code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = 0
@slow
def lowercase_ ( self ) -> Optional[Any]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowercase ) , 0 )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__lowercase , config=__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowercase_ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowercase , '''vocab.txt''' ) )
lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''bert''' , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowercase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowercase , '''merges.txt''' ) )
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''gpt2''' , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@require_tokenizers
def lowercase_ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowercase , '''vocab.txt''' ) )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowercase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowercase , '''merges.txt''' ) )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowercase , __lowercase )
def lowercase_ ( self ) -> Dict:
with pytest.raises(__lowercase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self ) -> int:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase_ : List[Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowercase , __lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowercase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowercase_ ( self ) -> Optional[int]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowercase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase_ : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self ) -> List[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase_ : Optional[int] = TOKENIZER_MAPPING.values()
lowerCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowercase )
@require_tokenizers
def lowercase_ ( self ) -> Dict:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowercase ) , __lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowercase )
@require_tokenizers
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowercase )
lowerCAmelCase_ : Optional[Any] = '''Hello, world. How are you?'''
lowerCAmelCase_ : List[str] = tokenizer.tokenize(__lowercase )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowercase )
lowerCAmelCase_ : List[str] = tokenizer.tokenize(__lowercase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowercase ) , __lowercase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowercase , __lowercase )
def lowercase_ ( self ) -> Optional[Any]:
# Check we can load the tokenizer config of an online model.
lowerCAmelCase_ : Union[str, Any] = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase_ : Union[str, Any] = config.pop('''_commit_hash''' , __lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowercase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase_ : List[str] = get_tokenizer_config(__lowercase )
self.assertDictEqual(__lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : List[str] = get_tokenizer_config(__lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self ) -> Union[str, Any]:
try:
AutoConfig.register('''custom''' , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
lowerCAmelCase_ : Optional[Any] = CustomTokenizer.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self ) -> List[str]:
try:
AutoConfig.register('''custom''' , __lowercase )
# Can register in two steps
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowercase , slow_tokenizer_class=__lowercase , fast_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Union[str, Any] = BertTokenizerFast.from_pretrained(__lowercase )
bert_tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = CustomTokenizerFast.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self ) -> Tuple:
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = False
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = NewTokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = False
try:
AutoConfig.register('''custom''' , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
# If remote code is not set, the default is to use local
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self ) -> int:
with self.assertRaisesRegex(
__lowercase , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self ) -> Optional[int]:
with self.assertRaisesRegex(
__lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase , revision='''aaaaaa''' )
def lowercase_ ( self ) -> str:
# Make sure we have cached the tokenizer.
lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 721 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 0 |
def lowerCAmelCase ( lowerCAmelCase_ = 2_000_000 )-> int:
lowerCAmelCase_ : List[Any] = [0 for i in range(n + 1 )]
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Optional[int] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCAmelCase_ ):
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : Tuple = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""") | 700 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3_2 , __lowercase=2 , __lowercase=3 , __lowercase=1_6 , __lowercase=[3_2, 6_4, 1_2_8] , __lowercase=[1, 2, 1] , __lowercase=[2, 2, 4] , __lowercase=2 , __lowercase=2.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=True , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=1_0 , __lowercase=8 , __lowercase=["stage1", "stage2"] , __lowercase=[1, 2] , ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : int = hidden_sizes
lowerCAmelCase_ : Any = depths
lowerCAmelCase_ : Optional[Any] = num_heads
lowerCAmelCase_ : Optional[int] = window_size
lowerCAmelCase_ : Union[str, Any] = mlp_ratio
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = drop_path_rate
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : str = use_absolute_embeddings
lowerCAmelCase_ : Tuple = patch_norm
lowerCAmelCase_ : Optional[int] = layer_norm_eps
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : Tuple = scope
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : int = type_sequence_label_size
lowerCAmelCase_ : Any = encoder_stride
lowerCAmelCase_ : Dict = out_features
lowerCAmelCase_ : List[str] = out_indices
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Union[str, Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = FocalNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Dict = model(__lowercase )
lowerCAmelCase_ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase_ : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : str = FocalNetBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : str = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = FocalNetBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : str = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : Dict = FocalNetForMaskedImageModeling(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : str = model(__lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Optional[int] = FocalNetForMaskedImageModeling(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ : Any = model(__lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCAmelCase_ : Optional[Any] = self.type_sequence_label_size
lowerCAmelCase_ : Dict = FocalNetForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : List[str] = FocalNetForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ : Any = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Any = FocalNetModelTester(self )
lowerCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=__lowercase , embed_dim=3_7 , has_text_modality=__lowercase )
def lowercase_ ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Any:
return
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def lowercase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ : int = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ : Optional[int] = model_class(__lowercase )
lowerCAmelCase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Any = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : str = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
lowerCAmelCase_ : Tuple = outputs.hidden_states
lowerCAmelCase_ : Optional[Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowercase ) , __lowercase )
# FocalNet has a different seq_length
lowerCAmelCase_ : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase_ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase_ : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__lowercase ) , __lowercase )
lowerCAmelCase_ : List[Any] = reshaped_hidden_states[0].shape
lowerCAmelCase_ : Tuple = (
reshaped_hidden_states[0].view(__lowercase , __lowercase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Dict = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = 3
lowerCAmelCase_ : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase_ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase_ : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase_ : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ : List[Any] = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) )
@slow
def lowercase_ ( self ) -> Dict:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = FocalNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class(config=__lowercase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> int:
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(__lowercase )
lowerCAmelCase_ : Any = self.default_image_processor
lowerCAmelCase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase_ : Any = image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**__lowercase )
# verify the logits
lowerCAmelCase_ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowercase )
lowerCAmelCase_ : int = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = FocalNetConfig
SCREAMING_SNAKE_CASE__ : str = False
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = FocalNetModelTester(self ) | 701 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> List[Any]:
lowerCAmelCase_ : Tuple = data
lowerCAmelCase_ : Any = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Optional[int]:
return ((n << b) | (n >> (3_2 - b))) & 0xFFFFFFFF
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Dict = B'''\x80''' + B'''\x00''' * (6_3 - (len(self.data ) + 8) % 6_4)
lowerCAmelCase_ : Tuple = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def lowercase_ ( self ) -> List[str]:
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : str = list(struct.unpack('''>16L''' , __lowercase ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
lowerCAmelCase_ : List[str] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[int] = self.padding()
lowerCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
lowerCAmelCase_ : int = self.expand_block(__lowercase )
lowerCAmelCase_ : Optional[int] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
lowerCAmelCase_ : str = (b & c) | ((~b) & d)
lowerCAmelCase_ : Any = 0x5A827999
elif 2_0 <= i < 4_0:
lowerCAmelCase_ : List[Any] = b ^ c ^ d
lowerCAmelCase_ : Any = 0x6ED9EBA1
elif 4_0 <= i < 6_0:
lowerCAmelCase_ : Optional[int] = (b & c) | (b & d) | (c & d)
lowerCAmelCase_ : Optional[int] = 0x8F1BBCDC
elif 6_0 <= i < 8_0:
lowerCAmelCase_ : List[str] = b ^ c ^ d
lowerCAmelCase_ : str = 0xCA62C1D6
lowerCAmelCase_ : int = (
self.rotate(__lowercase , 5 ) + f + e + k + expanded_block[i] & 0xFFFFFFFF,
a,
self.rotate(__lowercase , 3_0 ),
c,
d,
)
lowerCAmelCase_ : Tuple = (
self.h[0] + a & 0xFFFFFFFF,
self.h[1] + b & 0xFFFFFFFF,
self.h[2] + c & 0xFFFFFFFF,
self.h[3] + d & 0xFFFFFFFF,
self.h[4] + e & 0xFFFFFFFF,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase ( ):
lowerCAmelCase_ : Optional[int] = B'''Test String'''
assert SHAaHash(lowerCAmelCase_ ).final_hash() == hashlib.shaa(lowerCAmelCase_ ).hexdigest() # noqa: S324
def lowerCAmelCase ( ):
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
lowerCAmelCase_ : List[str] = f.read()
else:
lowerCAmelCase_ : Optional[int] = bytes(lowerCAmelCase_ , '''utf-8''' )
print(SHAaHash(lowerCAmelCase_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod() | 702 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
lowerCAmelCase_ : List[str] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
lowerCAmelCase_ : Tuple = DatasetInfosDict.from_directory(lowerCAmelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : Tuple = str(lowerCAmelCase_ )
dataset_info.write_to_directory(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = DatasetInfo.from_directory(lowerCAmelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCAmelCase_ , '''dataset_info.json''' ) )
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Tuple = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
lowerCAmelCase_ : Any = dataset_info._to_yaml_dict()
assert sorted(lowerCAmelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase_ : Tuple = yaml.safe_dump(lowerCAmelCase_ )
lowerCAmelCase_ : int = yaml.safe_load(lowerCAmelCase_ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase ( )-> List[Any]:
lowerCAmelCase_ : Optional[Any] = DatasetInfo()
lowerCAmelCase_ : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : List[str] = str(lowerCAmelCase_ )
dataset_infos_dict.write_to_directory(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = DatasetInfosDict.from_directory(lowerCAmelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase_ : List[str] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase_ : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCAmelCase_ , '''README.md''' ) ) | 703 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : Any =16
_UpperCAmelCase : Optional[Any] =32
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> str:
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Dict = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : Any = 8
else:
lowerCAmelCase_ : List[str] = None
return tokenizer.pad(
lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
lowerCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase : List[str] =mocked_dataloaders # noqa: F811
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_ ) == "1":
lowerCAmelCase_ : Any = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase_ : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowerCAmelCase_ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Tuple = config['''lr''']
lowerCAmelCase_ : List[Any] = int(config['''num_epochs'''] )
lowerCAmelCase_ : List[str] = int(config['''seed'''] )
lowerCAmelCase_ : List[Any] = int(config['''batch_size'''] )
set_seed(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase_ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase_ : Dict = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase_ : str = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : List[str] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
lowerCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ : str = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase_ : List[str] = os.path.split(lowerCAmelCase_ )[-1].split('''.''' )[0]
accelerator.init_trackers(lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase_ : str = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ : List[Any] = model(**lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase_ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
lowerCAmelCase_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(lowerCAmelCase_ ),
'''epoch''': epoch,
} , step=lowerCAmelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( )-> Dict:
lowerCAmelCase_ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowerCAmelCase_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowerCAmelCase_ : Dict = parser.parse_args()
lowerCAmelCase_ : List[str] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main() | 704 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : List[Any] ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_UpperCAmelCase : Optional[int] ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_UpperCAmelCase : Union[str, Any] ={"""facebook/blenderbot_small-90M""": 512}
def lowerCAmelCase ( lowerCAmelCase_ )-> Any:
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase_ : int = char
lowerCAmelCase_ : str = set(lowerCAmelCase_ )
return pairs
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase , __lowercase="__start__" , __lowercase="__end__" , __lowercase="__unk__" , __lowercase="__null__" , **__lowercase , ) -> Optional[Any]:
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase_ : str = json.load(__lowercase )
lowerCAmelCase_ : Any = {v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase_ : Dict = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase_ : int = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
lowerCAmelCase_ : Optional[Any] = {}
@property
def lowercase_ ( self ) -> int:
return len(self.encoder )
def lowercase_ ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , __lowercase ) -> str:
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : int = re.sub('''([.,!?()])''' , R''' \1''' , __lowercase )
lowerCAmelCase_ : List[str] = re.sub('''(\')''' , R''' \1 ''' , __lowercase )
lowerCAmelCase_ : Optional[int] = re.sub(R'''\s{2,}''' , ''' ''' , __lowercase )
if "\n" in token:
lowerCAmelCase_ : Any = token.replace('''\n''' , ''' __newln__''' )
lowerCAmelCase_ : List[str] = token.split(''' ''' )
lowerCAmelCase_ : List[Any] = []
for token in tokens:
if not len(__lowercase ):
continue
lowerCAmelCase_ : Union[str, Any] = token.lower()
lowerCAmelCase_ : Optional[int] = tuple(__lowercase )
lowerCAmelCase_ : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase_ : Optional[int] = get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
lowerCAmelCase_ : int = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : str = 0
while i < len(__lowercase ):
try:
lowerCAmelCase_ : Optional[Any] = word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
lowerCAmelCase_ : str = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Union[str, Any] = tuple(__lowercase )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(__lowercase ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(__lowercase )
lowerCAmelCase_ : Dict = '''@@ '''.join(__lowercase )
lowerCAmelCase_ : Optional[Any] = word[:-4]
lowerCAmelCase_ : Optional[int] = word
words.append(__lowercase )
return " ".join(__lowercase )
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : List[Any] = re.findall(R'''\S+\n?''' , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self , __lowercase ) -> int:
lowerCAmelCase_ : int = token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , __lowercase ) -> str:
return self.decoder.get(__lowercase , self.unk_token )
def lowercase_ ( self , __lowercase ) -> str:
lowerCAmelCase_ : int = ''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : Dict = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ : Tuple = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
lowerCAmelCase_ : Optional[int] = 0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase_ : Tuple = token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file | 705 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_UpperCAmelCase ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
_UpperCAmelCase ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
_UpperCAmelCase ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
return float((preds == labels).mean() )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
lowerCAmelCase_ : Any = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
lowerCAmelCase_ : Any = np.array(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = np.array(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = en_sentvecs.shape[0]
# mean centering
lowerCAmelCase_ : int = en_sentvecs - np.mean(lowerCAmelCase_ , axis=0 )
lowerCAmelCase_ : Dict = in_sentvecs - np.mean(lowerCAmelCase_ , axis=0 )
lowerCAmelCase_ : List[Any] = cdist(lowerCAmelCase_ , lowerCAmelCase_ , '''cosine''' )
lowerCAmelCase_ : Optional[Any] = np.array(range(lowerCAmelCase_ ) )
lowerCAmelCase_ : Any = sim.argsort(axis=1 )[:, :10]
lowerCAmelCase_ : List[str] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowercase_ ( self , __lowercase , __lowercase ) -> Union[str, Any]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowercase , __lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowercase , __lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' ) | 706 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 0 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
(lowerCAmelCase_) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self , __lowercase ) -> float:
return 0.0
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> tuple[int | float, int | float]:
lowerCAmelCase_ : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCAmelCase_ : str = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> None:
lowerCAmelCase_ : List[Any] = 512
lowerCAmelCase_ : Any = [1] + [0] * (size - 1)
lowerCAmelCase_ : Optional[int] = [filter_type.process(lowerCAmelCase_ ) for item in inputs]
lowerCAmelCase_ : int = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCAmelCase_ : Tuple = np.abs(np.fft.fft(lowerCAmelCase_ ) )
lowerCAmelCase_ : Any = 20 * np.logaa(lowerCAmelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowerCAmelCase_ : Dict = get_bounds(lowerCAmelCase_ , lowerCAmelCase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowerCAmelCase_ )
plt.show()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> None:
lowerCAmelCase_ : Union[str, Any] = 512
lowerCAmelCase_ : Optional[Any] = [1] + [0] * (size - 1)
lowerCAmelCase_ : Optional[Any] = [filter_type.process(lowerCAmelCase_ ) for item in inputs]
lowerCAmelCase_ : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCAmelCase_ : Dict = np.angle(np.fft.fft(lowerCAmelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowerCAmelCase_ , -2 * pi ) )
plt.show()
| 708 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : List[str] ={
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """xlm"""
SCREAMING_SNAKE_CASE__ : List[str] = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , __lowercase=3_0_1_4_5 , __lowercase=2_0_4_8 , __lowercase=1_2 , __lowercase=1_6 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=1 , __lowercase=True , __lowercase=5_1_2 , __lowercase=2_0_4_8**-0.5 , __lowercase=1e-12 , __lowercase=0.02 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=3 , __lowercase=5 , __lowercase=True , __lowercase="first" , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=0.1 , __lowercase=5 , __lowercase=5 , __lowercase=0 , __lowercase=0 , __lowercase=2 , __lowercase=0 , **__lowercase , ) -> Any:
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : List[Any] = emb_dim
lowerCAmelCase_ : Dict = n_layers
lowerCAmelCase_ : int = n_heads
lowerCAmelCase_ : str = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : str = gelu_activation
lowerCAmelCase_ : Optional[Any] = sinusoidal_embeddings
lowerCAmelCase_ : Optional[Any] = causal
lowerCAmelCase_ : List[Any] = asm
lowerCAmelCase_ : Dict = n_langs
lowerCAmelCase_ : Dict = use_lang_emb
lowerCAmelCase_ : int = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = bos_index
lowerCAmelCase_ : str = eos_index
lowerCAmelCase_ : str = pad_index
lowerCAmelCase_ : Optional[int] = unk_index
lowerCAmelCase_ : str = mask_index
lowerCAmelCase_ : Tuple = is_encoder
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : List[Any] = embed_init_std
lowerCAmelCase_ : Any = init_std
lowerCAmelCase_ : List[Any] = summary_type
lowerCAmelCase_ : Optional[Any] = summary_use_proj
lowerCAmelCase_ : Tuple = summary_activation
lowerCAmelCase_ : int = summary_proj_to_labels
lowerCAmelCase_ : Union[str, Any] = summary_first_dropout
lowerCAmelCase_ : Dict = start_n_top
lowerCAmelCase_ : str = end_n_top
lowerCAmelCase_ : Optional[Any] = mask_token_id
lowerCAmelCase_ : int = lang_id
if "n_words" in kwargs:
lowerCAmelCase_ : Any = kwargs['''n_words''']
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , **__lowercase )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 709 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[str] = 1.5
lowerCAmelCase_ : List[Any] = int(factor * num_class_images )
lowerCAmelCase_ : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase_ : Any = client.query(text=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCAmelCase_ : str = int(factor * num_images )
lowerCAmelCase_ : Tuple = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , )
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Tuple = tqdm(desc='''downloading real regularization images''' , total=lowerCAmelCase_ )
with open(f"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(f"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
f"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowerCAmelCase_ : Optional[Any] = class_images[count]
count += 1
try:
lowerCAmelCase_ : Optional[Any] = requests.get(images['''url'''] )
if img.status_code == 200:
lowerCAmelCase_ : Union[str, Any] = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase ( )-> Union[str, Any]:
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser('''''' , add_help=lowerCAmelCase_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=lowerCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : List[str] =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 710 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Tuple ={
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =[
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 711 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 0 |
'''simple docstring'''
def lowerCAmelCase ( )-> List[Any]:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : int = 1
while len(lowerCAmelCase_ ) < 1e6:
constant.append(str(lowerCAmelCase_ ) )
i += 1
lowerCAmelCase_ : Tuple = ''''''.join(lowerCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution()) | 712 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : Tuple ={
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """lilt"""
def __init__( self , __lowercase=3_0_5_2_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=0 , __lowercase="absolute" , __lowercase=None , __lowercase=4 , __lowercase=1_0_2_4 , **__lowercase , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Optional[int] = type_vocab_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : int = classifier_dropout
lowerCAmelCase_ : List[Any] = channel_shrink_ratio
lowerCAmelCase_ : Optional[Any] = max_ad_position_embeddings | 713 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 714 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
def lowerCAmelCase ( lowerCAmelCase_ )-> list[list]:
lowerCAmelCase_ : Optional[int] = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase_ ):
lowerCAmelCase_ : Dict = row[0]
for column_index, column in enumerate(lowerCAmelCase_ ):
if magnitude == 0:
lowerCAmelCase_ : List[Any] = column
continue
lowerCAmelCase_ : Union[str, Any] = column / magnitude
# Subtract to cancel term
lowerCAmelCase_ : Tuple = current_set[0]
lowerCAmelCase_ : str = [first_row]
lowerCAmelCase_ : Union[str, Any] = current_set[1::]
for row in current_set:
lowerCAmelCase_ : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase_ )
continue
for column_index in range(len(lowerCAmelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCAmelCase_ : Union[str, Any] = final_set[0]
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCAmelCase_ : List[str] = simplify(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = resultant
return final_set
def lowerCAmelCase ( lowerCAmelCase_ )-> list:
if len(lowerCAmelCase_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) + 1
if any(len(lowerCAmelCase_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(lowerCAmelCase_ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(lowerCAmelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCAmelCase_ : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
lowerCAmelCase_ : int = data_set.copy()
lowerCAmelCase_ : str = []
for row_index, row in enumerate(lowerCAmelCase_ ):
if 0 not in row:
lowerCAmelCase_ : Any = data_set.pop(lowerCAmelCase_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , lowerCAmelCase_ )
lowerCAmelCase_ : Any = data_set.copy()
lowerCAmelCase_ : str = simplify(lowerCAmelCase_ )
lowerCAmelCase_ : Any = simplified[::-1]
lowerCAmelCase_ : list = []
for row in simplified:
lowerCAmelCase_ : Dict = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCAmelCase_ : str = row.copy()[: len(lowerCAmelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase_ ) == 0:
solutions.append(0 )
continue
lowerCAmelCase_ : List[str] = temp_row[1::]
lowerCAmelCase_ : int = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = []
for item in solutions:
final.append(float(round(lowerCAmelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : List[Any] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 715 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_UpperCAmelCase : Dict =logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] ={
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """layoutlmv3"""
def __init__( self , __lowercase=5_0_2_6_5 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=1_0_2_4 , __lowercase=1_2_8 , __lowercase=1_2_8 , __lowercase=True , __lowercase=3_2 , __lowercase=1_2_8 , __lowercase=6_4 , __lowercase=2_5_6 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=2_2_4 , __lowercase=3 , __lowercase=1_6 , __lowercase=None , **__lowercase , ) -> Dict:
super().__init__(
vocab_size=__lowercase , hidden_size=__lowercase , num_hidden_layers=__lowercase , num_attention_heads=__lowercase , intermediate_size=__lowercase , hidden_act=__lowercase , hidden_dropout_prob=__lowercase , attention_probs_dropout_prob=__lowercase , max_position_embeddings=__lowercase , type_vocab_size=__lowercase , initializer_range=__lowercase , layer_norm_eps=__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
lowerCAmelCase_ : Tuple = max_ad_position_embeddings
lowerCAmelCase_ : List[str] = coordinate_size
lowerCAmelCase_ : Optional[Any] = shape_size
lowerCAmelCase_ : Optional[Any] = has_relative_attention_bias
lowerCAmelCase_ : List[str] = rel_pos_bins
lowerCAmelCase_ : str = max_rel_pos
lowerCAmelCase_ : Optional[Any] = has_spatial_attention_bias
lowerCAmelCase_ : Optional[int] = rel_ad_pos_bins
lowerCAmelCase_ : List[str] = max_rel_ad_pos
lowerCAmelCase_ : List[Any] = text_embed
lowerCAmelCase_ : Tuple = visual_embed
lowerCAmelCase_ : List[str] = input_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Any = classifier_dropout
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = version.parse("""1.12""" )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowercase_ ( self ) -> float:
return 1e-5
@property
def lowercase_ ( self ) -> int:
return 1_2
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , __lowercase = 3 , __lowercase = 4_0 , __lowercase = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , __lowercase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Any = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : Optional[Any] = processor.tokenizer.num_special_tokens_to_add(__lowercase )
lowerCAmelCase_ : Union[str, Any] = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Any = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCAmelCase_ : List[Any] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCAmelCase_ : Optional[Any] = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : str = dict(
processor(
__lowercase , text=__lowercase , boxes=__lowercase , return_tensors=__lowercase , ) )
return inputs
| 716 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : Union[str, Any] ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
_UpperCAmelCase : Any ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_UpperCAmelCase : str ="""▁"""
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowercase , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase="[CLS]" , __lowercase="[SEP]" , __lowercase="<unk>" , __lowercase="[SEP]" , __lowercase="<pad>" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase_ : Tuple = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
lowerCAmelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase_ : Tuple = do_lower_case
lowerCAmelCase_ : List[Any] = remove_space
lowerCAmelCase_ : Optional[int] = keep_accents
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
return len(self.sp_model )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
lowerCAmelCase_ : Any = self.__dict__.copy()
lowerCAmelCase_ : str = None
return state
def __setstate__( self , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , __lowercase ) -> int:
if self.remove_space:
lowerCAmelCase_ : Union[str, Any] = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase_ : Optional[Any] = inputs
lowerCAmelCase_ : int = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase_ : int = unicodedata.normalize('''NFKD''' , __lowercase )
lowerCAmelCase_ : str = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
lowerCAmelCase_ : int = outputs.lower()
return outputs
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = self.preprocess_text(__lowercase )
lowerCAmelCase_ : Optional[int] = self.sp_model.encode(__lowercase , out_type=__lowercase )
lowerCAmelCase_ : List[str] = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase_ : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ : List[str] = cur_pieces[1:]
else:
lowerCAmelCase_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
return self.sp_model.PieceToId(__lowercase )
def lowercase_ ( self , __lowercase ) -> Optional[int]:
return self.sp_model.IdToPiece(__lowercase )
def lowercase_ ( self , __lowercase ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Any = ''''''
lowerCAmelCase_ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : List[Any] = []
else:
current_sub_tokens.append(__lowercase )
lowerCAmelCase_ : List[str] = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : Dict = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,) | 717 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE__ : List[str] = """CLIPImageProcessor"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __lowercase=None , __lowercase=None , **__lowercase ) -> Dict:
lowerCAmelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
lowerCAmelCase_ : str = kwargs.pop('''feature_extractor''' )
lowerCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase ) -> Tuple:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase_ : List[str] = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
lowerCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def lowercase_ ( self , *__lowercase , **__lowercase ) -> str:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[Any] = self.tokenizer.model_input_names
lowerCAmelCase_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ ( self ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , )
return self.image_processor_class
@property
def lowercase_ ( self ) -> str:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , )
return self.image_processor | 718 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] ={
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """table-transformer"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Tuple = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=1_0_0 , __lowercase=6 , __lowercase=2_0_4_8 , __lowercase=8 , __lowercase=6 , __lowercase=2_0_4_8 , __lowercase=8 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , **__lowercase , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCAmelCase_ : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = backbone_config.get('''model_type''' )
lowerCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : int = config_class.from_dict(__lowercase )
# set timm attributes to None
lowerCAmelCase_ : Optional[Any] = None, None, None
lowerCAmelCase_ : Tuple = use_timm_backbone
lowerCAmelCase_ : str = backbone_config
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : str = num_queries
lowerCAmelCase_ : List[Any] = d_model
lowerCAmelCase_ : int = encoder_ffn_dim
lowerCAmelCase_ : Union[str, Any] = encoder_layers
lowerCAmelCase_ : Dict = encoder_attention_heads
lowerCAmelCase_ : str = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = decoder_layers
lowerCAmelCase_ : Dict = decoder_attention_heads
lowerCAmelCase_ : Optional[Any] = dropout
lowerCAmelCase_ : List[Any] = attention_dropout
lowerCAmelCase_ : Dict = activation_dropout
lowerCAmelCase_ : Optional[Any] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Optional[Any] = init_xavier_std
lowerCAmelCase_ : int = encoder_layerdrop
lowerCAmelCase_ : Optional[int] = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = encoder_layers
lowerCAmelCase_ : Dict = auxiliary_loss
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : str = backbone
lowerCAmelCase_ : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase_ : Union[str, Any] = dilation
# Hungarian matcher
lowerCAmelCase_ : int = class_cost
lowerCAmelCase_ : Union[str, Any] = bbox_cost
lowerCAmelCase_ : Any = giou_cost
# Loss coefficients
lowerCAmelCase_ : int = mask_loss_coefficient
lowerCAmelCase_ : int = dice_loss_coefficient
lowerCAmelCase_ : Dict = bbox_loss_coefficient
lowerCAmelCase_ : List[str] = giou_loss_coefficient
lowerCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def lowercase_ ( self ) -> int:
return self.encoder_attention_heads
@property
def lowercase_ ( self ) -> int:
return self.d_model
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = version.parse("""1.11""" )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowercase_ ( self ) -> float:
return 1e-5
@property
def lowercase_ ( self ) -> int:
return 1_2 | 719 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : int =logging.get_logger(__name__)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : str = to_pil_image(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = pil_image.size
lowerCAmelCase_ : Any = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type='''dict''' , config=lowerCAmelCase_ )
lowerCAmelCase_ : Dict = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase_ : Dict = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
lowerCAmelCase_ : int = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : List[Any] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : int = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase_ : str = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : str = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
lowerCAmelCase_ : Optional[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase_ : Any = get_size_dict(__lowercase )
lowerCAmelCase_ : Dict = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Optional[Any] = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_value
lowerCAmelCase_ : List[Any] = do_normalize
lowerCAmelCase_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase_ : Tuple = apply_ocr
lowerCAmelCase_ : Dict = ocr_lang
lowerCAmelCase_ : Union[str, Any] = tesseract_config
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Union[str, Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase_ : List[str] = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : List[str] = size if size is not None else self.size
lowerCAmelCase_ : str = get_size_dict(__lowercase )
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase_ : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase_ : int = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase_ : int = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : List[Any] = [to_numpy_array(__lowercase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Any = []
for image in images:
lowerCAmelCase_ : List[str] = apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : List[Any] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[str] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
lowerCAmelCase_ : str = words_batch
lowerCAmelCase_ : str = boxes_batch
return data | 720 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCAmelCase : List[Any] =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , __lowercase=3_2 ) -> int:
set_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDModel(sample_size=__lowercase , in_channels=3 , out_channels=3 )
lowerCAmelCase_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : str = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase_ : Optional[int] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowercase , )
lowerCAmelCase_ : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase_ : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(__lowercase ) for _ in range(4 )]
lowerCAmelCase_ : Union[str, Any] = [torch.randn((4, 3, 3_2, 3_2) ).to(__lowercase ) for _ in range(4 )]
lowerCAmelCase_ : int = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(__lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase_ : Optional[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(__lowercase )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase_ : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase_ : Dict = model(__lowercase , timesteps[i] ).sample
lowerCAmelCase_ : Tuple = torch.nn.functional.mse_loss(__lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase_ : Union[str, Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(__lowercase )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase_ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase_ : Union[str, Any] = model(__lowercase , timesteps[i] ).sample
lowerCAmelCase_ : Optional[int] = torch.nn.functional.mse_loss(__lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) ) | 721 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : List[Any] ={
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 700 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : Any = BlipImageProcessor()
lowerCAmelCase_ : Any = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowerCAmelCase_ : Tuple = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
lowerCAmelCase_ : str = InstructBlipProcessor(__lowercase , __lowercase , __lowercase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **__lowercase ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).tokenizer
def lowercase_ ( self , **__lowercase ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def lowercase_ ( self , **__lowercase ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).qformer_tokenizer
def lowercase_ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : str = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase_ : Any = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
lowerCAmelCase_ : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
self.assertIsInstance(processor.qformer_tokenizer , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[int] = self.get_image_processor()
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_qformer_tokenizer()
lowerCAmelCase_ : Optional[int] = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = image_processor(__lowercase , return_tensors='''np''' )
lowerCAmelCase_ : Dict = processor(images=__lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ : str = self.get_qformer_tokenizer()
lowerCAmelCase_ : int = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : Optional[int] = '''lower newer'''
lowerCAmelCase_ : List[Any] = processor(text=__lowercase )
lowerCAmelCase_ : int = tokenizer(__lowercase , return_token_type_ids=__lowercase )
lowerCAmelCase_ : List[Any] = qformer_tokenizer(__lowercase , return_token_type_ids=__lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = self.get_qformer_tokenizer()
lowerCAmelCase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : Dict = '''lower newer'''
lowerCAmelCase_ : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : int = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : int = self.get_qformer_tokenizer()
lowerCAmelCase_ : Optional[int] = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : Tuple = processor.batch_decode(__lowercase )
lowerCAmelCase_ : Dict = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = self.get_image_processor()
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase_ : List[str] = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : Union[str, Any] = '''lower newer'''
lowerCAmelCase_ : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase_ : Tuple = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 701 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 0 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ ):
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 702 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_UpperCAmelCase : Optional[int] =HfArgumentParser(InitializationArguments)
_UpperCAmelCase : Optional[int] =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_UpperCAmelCase : Union[str, Any] ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_UpperCAmelCase : str =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_UpperCAmelCase : int =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 703 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : List[Any] ={
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """biogpt"""
def __init__( self , __lowercase=4_2_3_8_4 , __lowercase=1_0_2_4 , __lowercase=2_4 , __lowercase=1_6 , __lowercase=4_0_9_6 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1_0_2_4 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=True , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ) -> str:
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : int = scale_embedding
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Dict = layerdrop
lowerCAmelCase_ : Tuple = activation_dropout
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) | 704 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_UpperCAmelCase : List[str] ="""CompVis/stable-diffusion-v1-1"""
_UpperCAmelCase : str ="""CompVis/stable-diffusion-v1-2"""
_UpperCAmelCase : Union[str, Any] ="""CompVis/stable-diffusion-v1-3"""
_UpperCAmelCase : List[str] ="""CompVis/stable-diffusion-v1-4"""
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> Tuple:
super()._init_()
lowerCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = StableDiffusionPipeline(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=__lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowercase_ ( self ) -> Dict[str, Any]:
return {k: getattr(self , __lowercase ) for k in self.config.keys() if not k.startswith('''_''' )}
def lowercase_ ( self , __lowercase = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase )
def lowercase_ ( self ) -> int:
self.enable_attention_slicing(__lowercase )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Optional[Any]:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Dict:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Tuple:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Any:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase_ : List[str] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase_ : Optional[Any] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase_ : Optional[Any] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] ) | 705 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=False )-> Optional[Any]:
lowerCAmelCase_ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False )-> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : Optional[Any] = ''''''
else:
lowerCAmelCase_ : Optional[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : List[Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase_ : Optional[Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : List[str] = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCAmelCase_ : Optional[int] = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : Union[str, Any] = dct.pop(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = val
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Any = ViTMSNConfig()
lowerCAmelCase_ : Optional[int] = 1_000
lowerCAmelCase_ : Optional[int] = '''datasets/huggingface/label-files'''
lowerCAmelCase_ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ : List[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ ) , '''r''' ) )
lowerCAmelCase_ : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = idalabel
lowerCAmelCase_ : Dict = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase_ : str = 384
lowerCAmelCase_ : Optional[Any] = 1_536
lowerCAmelCase_ : Dict = 6
elif "l16" in checkpoint_url:
lowerCAmelCase_ : Dict = 1_024
lowerCAmelCase_ : List[Any] = 4_096
lowerCAmelCase_ : Optional[Any] = 24
lowerCAmelCase_ : Optional[int] = 16
lowerCAmelCase_ : List[str] = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase_ : Dict = 4
elif "l7" in checkpoint_url:
lowerCAmelCase_ : Tuple = 7
lowerCAmelCase_ : Any = 1_024
lowerCAmelCase_ : Optional[int] = 4_096
lowerCAmelCase_ : Optional[Any] = 24
lowerCAmelCase_ : Optional[Any] = 16
lowerCAmelCase_ : Optional[int] = 0.1
lowerCAmelCase_ : List[str] = ViTMSNModel(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' )['''target_encoder''']
lowerCAmelCase_ : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , base_model=lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
lowerCAmelCase_ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : str = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
lowerCAmelCase_ : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ )
lowerCAmelCase_ : str = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase_ : Optional[Any] = model(**lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase_ : List[str] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase_ : str = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase_ : str = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
lowerCAmelCase_ : Union[str, Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase_ , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 706 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase_ ( self ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[str] = ort.SessionOptions()
lowerCAmelCase_ : int = False
return options
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCAmelCase_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowerCAmelCase_ : Dict = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Optional[int] = '''A red cat sitting on a park bench'''
lowerCAmelCase_ : Any = np.random.RandomState(0 )
lowerCAmelCase_ : Optional[Any] = pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2 | 707 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Tuple ={name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCAmelCase_ : Tuple = TOKENIZER_CLASSES
else:
lowerCAmelCase_ : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase_ , tokenizer_name + '''Fast''' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase_ : Optional[Any] = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase_ : List[str] = True
if checkpoint_name is None:
lowerCAmelCase_ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase_ : Union[str, Any] = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCAmelCase_ : str = tokenizer_class.from_pretrained(lowerCAmelCase_ , force_download=lowerCAmelCase_ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase_ : Union[str, Any] = checkpoint.split('''/''' )
lowerCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
elif add_prefix:
lowerCAmelCase_ : int = checkpoint
lowerCAmelCase_ : Any = dump_path
else:
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Any = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase_ : List[str] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase_ : List[str] = file_path.split(lowerCAmelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCAmelCase_ : List[Any] = tokenizer.save_pretrained(
lowerCAmelCase_ , legacy_format=lowerCAmelCase_ , filename_prefix=lowerCAmelCase_ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(lowerCAmelCase_ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
_UpperCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
_UpperCAmelCase : Optional[int] =parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 708 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VQModel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """sample"""
@property
def lowercase_ ( self , __lowercase=(3_2, 3_2) ) -> int:
lowerCAmelCase_ : Tuple = 4
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase )
return {"sample": image}
@property
def lowercase_ ( self ) -> Optional[int]:
return (3, 3_2, 3_2)
@property
def lowercase_ ( self ) -> Union[str, Any]:
return (3, 3_2, 3_2)
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Optional[Any]:
pass
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__lowercase )
lowerCAmelCase_ : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(__lowercase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase_ : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase_ : List[Any] = image.to(__lowercase )
with torch.no_grad():
lowerCAmelCase_ : Any = model(__lowercase ).sample
lowerCAmelCase_ : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Any = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) ) | 709 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 0 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 710 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 0 |
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase_ , 2 ) + pow(lowerCAmelCase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 0 |
'''simple docstring'''
import argparse
import os
import re
_UpperCAmelCase : Optional[Any] ="""src/diffusers"""
# Pattern that looks at the indentation in a line.
_UpperCAmelCase : Tuple =re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_UpperCAmelCase : Dict =re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_UpperCAmelCase : Dict =re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_UpperCAmelCase : Optional[Any] =re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_UpperCAmelCase : Tuple =re.compile(R"""\[([^\]]+)\]""")
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : List[Any] = _re_indent.search(lowerCAmelCase_ )
return "" if search is None else search.groups()[0]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , lowerCAmelCase_=None )-> str:
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : str = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase_ ):
index += 1
lowerCAmelCase_ : List[str] = ['''\n'''.join(lines[:index] )]
else:
lowerCAmelCase_ : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ : str = [lines[index]]
index += 1
while index < len(lowerCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCAmelCase_ ) )
if index < len(lowerCAmelCase_ ) - 1:
lowerCAmelCase_ : int = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ : Tuple = []
else:
blocks.append('''\n'''.join(lowerCAmelCase_ ) )
lowerCAmelCase_ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCAmelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
def _inner(lowerCAmelCase_ ):
return key(lowerCAmelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None )-> Dict:
# If no key is provided, we use a noop.
def noop(lowerCAmelCase_ ):
return x
if key is None:
lowerCAmelCase_ : Dict = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ : Union[str, Any] = [obj for obj in objects if key(lowerCAmelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ : int = [obj for obj in objects if key(lowerCAmelCase_ )[0].isupper() and not key(lowerCAmelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ : Dict = [obj for obj in objects if not key(lowerCAmelCase_ )[0].isupper()]
lowerCAmelCase_ : Optional[int] = ignore_underscore(lowerCAmelCase_ )
return sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase_ ):
lowerCAmelCase_ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase_ : Any = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Optional[Any] = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowerCAmelCase_ )] ) + "]"
lowerCAmelCase_ : str = import_statement.split('''\n''' )
if len(lowerCAmelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ : List[str] = 2 if lines[1].strip() == '''[''' else 1
lowerCAmelCase_ : List[Any] = [(i, _re_strip_line.search(lowerCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ : Tuple = sort_objects(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )
lowerCAmelCase_ : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ : int = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ : List[Any] = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Optional[Any] = keys[:-1]
lowerCAmelCase_ : str = get_indent(lines[1] ) + ''', '''.join([f"""\"{k}\"""" for k in sort_objects(lowerCAmelCase_ )] )
return "\n".join(lowerCAmelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ : Optional[Any] = _re_bracket_content.sub(_replace , lowerCAmelCase_ )
return import_statement
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=True )-> Optional[int]:
with open(lowerCAmelCase_ , '''r''' ) as f:
lowerCAmelCase_ : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ : Dict = split_code_in_indented_blocks(
lowerCAmelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ : List[str] = main_blocks[block_idx]
lowerCAmelCase_ : Any = block.split('''\n''' )
# Get to the start of the imports.
lowerCAmelCase_ : Optional[Any] = 0
while line_idx < len(lowerCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ : int = '''\n'''.join(block_lines[line_idx:-1] )
lowerCAmelCase_ : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ : str = split_code_in_indented_blocks(lowerCAmelCase_ , indent_level=lowerCAmelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ : Any = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ : Tuple = [(pattern.search(lowerCAmelCase_ ).groups()[0] if pattern.search(lowerCAmelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ : str = [(i, key) for i, key in enumerate(lowerCAmelCase_ ) if key is not None]
lowerCAmelCase_ : Tuple = [x[0] for x in sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCAmelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ : List[str] = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase_ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(lowerCAmelCase_ , '''w''' ) as f:
f.write('''\n'''.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( lowerCAmelCase_=True )-> Optional[int]:
lowerCAmelCase_ : str = []
for root, _, files in os.walk(lowerCAmelCase_ ):
if "__init__.py" in files:
lowerCAmelCase_ : Tuple = sort_imports(os.path.join(lowerCAmelCase_ , '''__init__.py''' ) , check_only=lowerCAmelCase_ )
if result:
lowerCAmelCase_ : str = [os.path.join(lowerCAmelCase_ , '''__init__.py''' )]
if len(lowerCAmelCase_ ) > 0:
raise ValueError(f"""Would overwrite {len(lowerCAmelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
_UpperCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_UpperCAmelCase : Optional[int] =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 712 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowerCAmelCase_ : Optional[Any] = dataset_size < in_memory_max_size
else:
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Optional[int] = is_small_dataset(lowerCAmelCase_ )
assert result == expected | 713 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_UpperCAmelCase : Tuple =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
_UpperCAmelCase : str =[]
_UpperCAmelCase : int =[]
_UpperCAmelCase : Any ={"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
_UpperCAmelCase : Union[str, Any] =[
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
"""emoji""": True,
},
}
]
_UpperCAmelCase : List[str] =0
for log in Path().glob("""*.log"""):
_UpperCAmelCase : List[str] =0
with open(log, """r""") as f:
for line in f:
_UpperCAmelCase : str =json.loads(line)
if line.get("""nodeid""", """""") != "":
_UpperCAmelCase : Optional[int] =line["""nodeid"""]
if line.get("""duration""", None) is not None:
_UpperCAmelCase : Tuple =f"""{line['duration']:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_UpperCAmelCase : str =[]
log.unlink()
_UpperCAmelCase : Any =""""""
_UpperCAmelCase : List[str] =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_UpperCAmelCase : int =[]
_UpperCAmelCase : List[Any] ={}
for test in failed_tests:
_UpperCAmelCase : str =test[0].split("""::""")
_UpperCAmelCase : Any =data[0].split("""/""")[-1]
if data[0] not in filesafailed:
_UpperCAmelCase : Optional[int] =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_UpperCAmelCase : List[str] =[test[0] for test in failed_table]
_UpperCAmelCase : Optional[Any] =list(set(files))
# Count number of instances in failed_tests
_UpperCAmelCase : Dict =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
_UpperCAmelCase : Tuple =tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_UpperCAmelCase : str ="""Too many failed tests, please see the full report in the Action results."""
_UpperCAmelCase : List[str] =len(err) + 10
_UpperCAmelCase : List[str] =message[: 3000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
_UpperCAmelCase : Optional[Any] ="""No failed tests! 🤗"""
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
_UpperCAmelCase : Any =WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
_UpperCAmelCase : List[str] ={
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
_UpperCAmelCase : Tuple ={
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
_UpperCAmelCase : int ={
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
_UpperCAmelCase : List[str] =client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
_UpperCAmelCase : int =response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_UpperCAmelCase : Optional[int] =""""""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_UpperCAmelCase : str =row[0]
else:
_UpperCAmelCase : Union[str, Any] =""""""
_UpperCAmelCase : int ={
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 714 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
from math import ceil
def lowerCAmelCase ( lowerCAmelCase_ = 1_001 )-> int:
lowerCAmelCase_ : int = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Union[str, Any] = 2 * i + 1
lowerCAmelCase_ : List[str] = 2 * i
lowerCAmelCase_ : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_UpperCAmelCase : Dict =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""") | 715 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Tuple ={
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =[
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_UpperCAmelCase : int ={
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 6_5536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 6_5536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 13_1072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
return torch.atana(lowerCAmelCase_ , lowerCAmelCase_ ) / math.pi * 2
def lowerCAmelCase ( lowerCAmelCase_ ):
lowerCAmelCase_ : List[Any] = torch.sin(t * math.pi / 2 ) ** 2
lowerCAmelCase_ : str = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
pass
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ : Tuple = DiffusionAttnUnetaD(__lowercase , n_attn_layers=4 )
lowerCAmelCase_ : Tuple = deepcopy(self.diffusion )
lowerCAmelCase_ : List[str] = torch.quasirandom.SobolEngine(1 , scramble=__lowercase )
def lowerCAmelCase ( lowerCAmelCase_ ):
lowerCAmelCase_ : Dict = MODELS_MAP[model_name]['''url''']
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
_UpperCAmelCase : Dict ={
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
_UpperCAmelCase : int ={
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
_UpperCAmelCase : Tuple ={
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
_UpperCAmelCase : Dict ={
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
_UpperCAmelCase : Tuple ={
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
_UpperCAmelCase : Optional[int] ={
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def lowerCAmelCase ( lowerCAmelCase_ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCAmelCase ( lowerCAmelCase_ ):
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase_ ) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return name.replace(lowerCAmelCase_ , lowerCAmelCase_ )
elif name.startswith(lowerCAmelCase_ ):
return [name.replace(lowerCAmelCase_ , lowerCAmelCase_ ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=13 ):
lowerCAmelCase_ : Dict = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
lowerCAmelCase_ : List[Any] = 0
if string.startswith('''net.3.''' ):
depth += 1
lowerCAmelCase_ : Optional[Any] = string[6:]
elif string.startswith('''net.''' ):
lowerCAmelCase_ : str = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
lowerCAmelCase_ : Dict = string[7:]
if string.startswith('''main.''' ):
lowerCAmelCase_ : Any = string[5:]
# mid block
if string[:2].isdigit():
lowerCAmelCase_ : Optional[int] = string[:2]
lowerCAmelCase_ : Union[str, Any] = string[2:]
else:
lowerCAmelCase_ : Any = string[0]
lowerCAmelCase_ : List[str] = string[1:]
if depth == max_depth:
lowerCAmelCase_ : List[Any] = MID_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : int = '''mid_block'''
elif depth > 0 and int(lowerCAmelCase_ ) < 7:
lowerCAmelCase_ : Dict = DOWN_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : int = f"""down_blocks.{depth}"""
elif depth > 0 and int(lowerCAmelCase_ ) > 7:
lowerCAmelCase_ : List[str] = UP_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : Union[str, Any] = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
lowerCAmelCase_ : str = DEPTH_0_TO_LAYER[layer_num]
lowerCAmelCase_ : str = f"""up_blocks.{max_depth - 1}""" if int(lowerCAmelCase_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
lowerCAmelCase_ : Tuple = string_left[1:]
if "resnets" in new_layer:
lowerCAmelCase_ : Tuple = convert_resconv_naming(lowerCAmelCase_ )
elif "attentions" in new_layer:
lowerCAmelCase_ : int = convert_attn_naming(lowerCAmelCase_ )
lowerCAmelCase_ : int = new_string_left
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = prefix + '''.''' + new_layer + '''.''' + string_left
else:
lowerCAmelCase_ : str = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def lowerCAmelCase ( lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
lowerCAmelCase_ : Any = rename(lowerCAmelCase_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = transform_conv_attns(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
lowerCAmelCase_ : List[str] = v
return new_state_dict
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) == 1:
if len(v.shape ) == 3:
# weight
lowerCAmelCase_ : Any = v[:, :, 0]
else:
# bias
lowerCAmelCase_ : Union[str, Any] = v
else:
# qkv matrices
lowerCAmelCase_ : Dict = v.shape[0]
lowerCAmelCase_ : List[str] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCAmelCase_ : Any = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCAmelCase_ : List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCAmelCase ( lowerCAmelCase_ ):
lowerCAmelCase_ : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase_ : List[Any] = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
lowerCAmelCase_ : Optional[int] = download(lowerCAmelCase_ )
lowerCAmelCase_ : Any = MODELS_MAP[model_name]['''sample_rate''']
lowerCAmelCase_ : Union[str, Any] = MODELS_MAP[model_name]['''sample_size''']
lowerCAmelCase_ : Dict = Object()
lowerCAmelCase_ : List[Any] = sample_size
lowerCAmelCase_ : Any = sample_rate
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[Any] = UNetaDModel(sample_size=lowerCAmelCase_ , sample_rate=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = diffusers_model.state_dict()
lowerCAmelCase_ : Optional[Any] = DiffusionUncond(lowerCAmelCase_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase_ )['''state_dict'''] )
lowerCAmelCase_ : Optional[Any] = orig_model.diffusion_ema.eval()
lowerCAmelCase_ : Tuple = orig_model.state_dict()
lowerCAmelCase_ : Tuple = rename_orig_weights(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCAmelCase_ : str = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase_ ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('''kernel''' ) for k in list(lowerCAmelCase_ ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
lowerCAmelCase_ : Tuple = value.squeeze()
lowerCAmelCase_ : int = value
diffusers_model.load_state_dict(lowerCAmelCase_ )
lowerCAmelCase_ : str = 100
lowerCAmelCase_ : Optional[Any] = 33
lowerCAmelCase_ : List[str] = IPNDMScheduler(num_train_timesteps=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase_ ).to(lowerCAmelCase_ )
lowerCAmelCase_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase_ )[:-1]
lowerCAmelCase_ : List[Any] = get_crash_schedule(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = DanceDiffusionPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = torch.manual_seed(33 )
lowerCAmelCase_ : List[Any] = pipe(num_inference_steps=lowerCAmelCase_ , generator=lowerCAmelCase_ ).audios
lowerCAmelCase_ : List[str] = sampling.iplms_sample(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {} )
lowerCAmelCase_ : Optional[int] = generated.clamp(-1 , 1 )
lowerCAmelCase_ : Tuple = (generated - audio).abs().sum()
lowerCAmelCase_ : Any = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , lowerCAmelCase_ )
print('''Diff max''' , lowerCAmelCase_ )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
_UpperCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_UpperCAmelCase : Dict =parser.parse_args()
main(args) | 717 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 0 |
import math
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ = 0.1 )-> int:
lowerCAmelCase_ : Tuple = 3
lowerCAmelCase_ : Dict = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 718 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase : Optional[int] =[
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
for attribute in key.split('''.''' ):
lowerCAmelCase_ : int = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
lowerCAmelCase_ : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
lowerCAmelCase_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase_ : Optional[int] = value
elif weight_type == "weight_g":
lowerCAmelCase_ : int = value
elif weight_type == "weight_v":
lowerCAmelCase_ : Optional[Any] = value
elif weight_type == "bias":
lowerCAmelCase_ : Dict = value
else:
lowerCAmelCase_ : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : str = fairseq_model.state_dict()
lowerCAmelCase_ : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase_ : Any = True
if "*" in mapped_key:
lowerCAmelCase_ : str = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
lowerCAmelCase_ : str = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase_ : Optional[int] = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ : Union[str, Any] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
lowerCAmelCase_ : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ : Tuple = '''weight'''
else:
lowerCAmelCase_ : int = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase_ : int = name.split('''.''' )
lowerCAmelCase_ : List[str] = int(items[0] )
lowerCAmelCase_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase_ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase_ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase_ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase_ : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None )-> Tuple:
# load the pre-trained checkpoints
lowerCAmelCase_ : Any = torch.load(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = WavLMConfigOrig(checkpoint['''cfg'''] )
lowerCAmelCase_ : Union[str, Any] = WavLMOrig(lowerCAmelCase_ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
lowerCAmelCase_ : Any = WavLMConfig.from_pretrained(lowerCAmelCase_ )
else:
lowerCAmelCase_ : Tuple = WavLMConfig()
lowerCAmelCase_ : Optional[Any] = WavLMModel(lowerCAmelCase_ )
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ )
hf_wavlm.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_UpperCAmelCase : List[Any] =parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 719 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple:
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__lowercase ) for s in shape] )}.npy"""
def lowercase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self , __lowercase=0 , __lowercase=(4, 4, 6_4, 6_4) , __lowercase=False ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(__lowercase , __lowercase ) ) , dtype=__lowercase )
return image
def lowercase_ ( self , __lowercase=False , __lowercase="CompVis/stable-diffusion-v1-4" ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase_ : Optional[int] = '''bf16''' if fpaa else None
lowerCAmelCase_ : List[str] = FlaxUNetaDConditionModel.from_pretrained(
__lowercase , subfolder='''unet''' , dtype=__lowercase , revision=__lowercase )
return model, params
def lowercase_ ( self , __lowercase=0 , __lowercase=(4, 7_7, 7_6_8) , __lowercase=False ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(__lowercase , __lowercase ) ) , dtype=__lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Dict = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__lowercase )
lowerCAmelCase_ : str = self.get_latents(__lowercase , fpaa=__lowercase )
lowerCAmelCase_ : Optional[Any] = self.get_encoder_hidden_states(__lowercase , fpaa=__lowercase )
lowerCAmelCase_ : List[Any] = model.apply(
{'''params''': params} , __lowercase , jnp.array(__lowercase , dtype=jnp.intaa ) , encoder_hidden_states=__lowercase , ).sample
assert sample.shape == latents.shape
lowerCAmelCase_ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase_ : str = jnp.array(__lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__lowercase , __lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Dict = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__lowercase )
lowerCAmelCase_ : Optional[Any] = self.get_latents(__lowercase , shape=(4, 4, 9_6, 9_6) , fpaa=__lowercase )
lowerCAmelCase_ : List[str] = self.get_encoder_hidden_states(__lowercase , shape=(4, 7_7, 1_0_2_4) , fpaa=__lowercase )
lowerCAmelCase_ : List[Any] = model.apply(
{'''params''': params} , __lowercase , jnp.array(__lowercase , dtype=jnp.intaa ) , encoder_hidden_states=__lowercase , ).sample
assert sample.shape == latents.shape
lowerCAmelCase_ : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase_ : Tuple = jnp.array(__lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__lowercase , __lowercase , atol=1e-2 ) | 720 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 0 |
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase ( )-> Dict:
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=lowerCAmelCase_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=lowerCAmelCase_ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=lowerCAmelCase_ , help='''where to store parsed gold_data_path file''' , )
lowerCAmelCase_ : List[str] = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
lowerCAmelCase_ : List[str] = json.load(lowerCAmelCase_ )
for dpr_record in tqdm(lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = dpr_record['''question''']
lowerCAmelCase_ : Union[str, Any] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(lowerCAmelCase_ ) + '''\n''' )
if __name__ == "__main__":
main() | 721 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 0 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> None:
lowerCAmelCase_ : Dict = len(lowerCAmelCase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase_ , lowerCAmelCase_ , )
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
lowerCAmelCase_ : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCAmelCase_ , lowerCAmelCase_ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase_ )
print('''''' )
print(len(lowerCAmelCase_ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 700 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 0 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000 )-> int:
lowerCAmelCase_ : List[Any] = 2**power
lowerCAmelCase_ : Any = 0
while n:
lowerCAmelCase_ : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 701 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] ={"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 702 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 0 |
from datetime import datetime
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> bytes:
lowerCAmelCase_ : Dict = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowerCAmelCase_ : List[str] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] =input("""Enter Video/IGTV url: """).strip()
_UpperCAmelCase : str =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""") | 703 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 0 |
from itertools import product
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]:
lowerCAmelCase_ : int = sides_number
lowerCAmelCase_ : List[str] = max_face_number * dice_number
lowerCAmelCase_ : Any = [0] * (max_total + 1)
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : str = range(lowerCAmelCase_ , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase_ , repeat=lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = sum(lowerCAmelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCAmelCase ( )-> float:
lowerCAmelCase_ : List[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase_ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : List[str] = 9
lowerCAmelCase_ : Union[str, Any] = 4 * 9
lowerCAmelCase_ : Tuple = 6
for peter_total in range(lowerCAmelCase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase_ : Union[str, Any] = (4**9) * (6**6)
lowerCAmelCase_ : Union[str, Any] = peter_wins_count / total_games_number
lowerCAmelCase_ : Optional[Any] = round(lowerCAmelCase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""") | 704 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 0 |
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : int = [int(lowerCAmelCase_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCAmelCase_ ) == 4 and all(0 <= int(lowerCAmelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =input().strip()
_UpperCAmelCase : str ="""valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""") | 705 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 0 |
from __future__ import annotations
import math
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ )-> list[int]:
lowerCAmelCase_ : Union[str, Any] = str(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = [n]
for i in range(1 , len(lowerCAmelCase_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if len(str(lowerCAmelCase_ ) ) > 3:
if not is_prime(int(str(lowerCAmelCase_ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase_ )[:3] ) ):
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ = 11 )-> list[int]:
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : Optional[int] = 13
while len(lowerCAmelCase_ ) != count:
if validate(lowerCAmelCase_ ):
lowerCAmelCase_ : List[Any] = list_truncated_nums(lowerCAmelCase_ )
if all(is_prime(lowerCAmelCase_ ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase_ )
num += 2
return list_truncated_primes
def lowerCAmelCase ( )-> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""") | 706 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=2 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , __lowercase=1_0_0_0 , ) -> str:
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[str] = use_input_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : str = num_choices
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : int = range_bbox
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ : Optional[int] = bbox[i, j, 3]
lowerCAmelCase_ : str = bbox[i, j, 1]
lowerCAmelCase_ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ : List[Any] = bbox[i, j, 2]
lowerCAmelCase_ : Any = bbox[i, j, 0]
lowerCAmelCase_ : Dict = t
lowerCAmelCase_ : Any = tf.convert_to_tensor(__lowercase )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Any = None
if self.use_token_type_ids:
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Optional[int] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : str = TFLayoutLMModel(config=__lowercase )
lowerCAmelCase_ : str = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
lowerCAmelCase_ : List[Any] = model(__lowercase , __lowercase , token_type_ids=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase , __lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Dict = TFLayoutLMForMaskedLM(config=__lowercase )
lowerCAmelCase_ : int = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Dict = TFLayoutLMForSequenceClassification(config=__lowercase )
lowerCAmelCase_ : Union[str, Any] = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = TFLayoutLMForTokenClassification(config=__lowercase )
lowerCAmelCase_ : int = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = TFLayoutLMForQuestionAnswering(config=__lowercase )
lowerCAmelCase_ : Dict = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : Any = config_and_inputs
lowerCAmelCase_ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[str] = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = TFLayoutLMModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 )
def lowercase_ ( self ) -> int:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def lowercase_ ( self ) -> Dict:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : str = TFLayoutLMModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase ( )-> List[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase_ : Union[str, Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
lowerCAmelCase_ : Dict = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
lowerCAmelCase_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase_ : List[str] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : int = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
lowerCAmelCase_ : Dict = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase_ : str = model(input_ids=__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
# test the sequence output on [0, :3, :3]
lowerCAmelCase_ : List[str] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase_ : Any = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __lowercase , atol=1e-3 ) )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
# initialize model with randomly initialized sequence classification head
lowerCAmelCase_ : str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
lowerCAmelCase_ : str = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase_ : Optional[int] = model(
input_ids=__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase_ : List[str] = outputs.loss
lowerCAmelCase_ : Optional[int] = (2,)
self.assertEqual(loss.shape , __lowercase )
# test the shape of the logits
lowerCAmelCase_ : List[str] = outputs.logits
lowerCAmelCase_ : Union[str, Any] = (2, 2)
self.assertEqual(logits.shape , __lowercase )
@slow
def lowercase_ ( self ) -> List[Any]:
# initialize model with randomly initialized token classification head
lowerCAmelCase_ : int = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3 )
lowerCAmelCase_ : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase_ : Optional[Any] = model(
input_ids=__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
# test the shape of the logits
lowerCAmelCase_ : Tuple = outputs.logits
lowerCAmelCase_ : List[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , __lowercase )
@slow
def lowercase_ ( self ) -> Tuple:
# initialize model with randomly initialized token classification head
lowerCAmelCase_ : Dict = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
lowerCAmelCase_ : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase_ : Any = model(input_ids=__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
# test the shape of the logits
lowerCAmelCase_ : Optional[int] = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , __lowercase )
self.assertEqual(outputs.end_logits.shape , __lowercase ) | 707 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Any = FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowercase ):
lowerCAmelCase_ : Tuple = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
@slow
def lowercase_ ( self ) -> List[Any]:
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase )
lowerCAmelCase_ : List[str] = FlaxRobertaModel.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
def lowercase_ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
__lowercase , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase_ : Optional[int] = FlaxAutoModel.from_pretrained('''bert-base''' )
def lowercase_ ( self ) -> List[str]:
with self.assertRaisesRegex(
__lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase_ : Any = FlaxAutoModel.from_pretrained(__lowercase , revision='''aaaaaa''' )
def lowercase_ ( self ) -> Tuple:
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
lowerCAmelCase_ : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self ) -> Tuple:
with self.assertRaisesRegex(__lowercase , '''Use `from_pt=True` to load this model''' ):
lowerCAmelCase_ : Tuple = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 708 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 0 |
'''simple docstring'''
from itertools import permutations
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase_ : Union[str, Any] = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> int:
return sum(
int(''''''.join(map(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
for num in permutations(range(lowerCAmelCase_ ) )
if is_substring_divisible(lowerCAmelCase_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 709 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__( UpperCAmelCase__, UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowercase , __lowercase = None , __lowercase = None ) -> Tuple:
super().__init__()
lowerCAmelCase_ : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCAmelCase_ : Dict = torch.zeros(__lowercase , __lowercase )
else:
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Union[str, Any] = torch.nn.Parameter(__lowercase )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : VQModel
SCREAMING_SNAKE_CASE__ : CLIPTextModel
SCREAMING_SNAKE_CASE__ : CLIPTokenizer
SCREAMING_SNAKE_CASE__ : TransformeraDModel
SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings
SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> str:
super().__init__()
self.register_modules(
vqvae=__lowercase , transformer=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : Any = len(__lowercase ) if isinstance(__lowercase , __lowercase ) else 1
# get prompt text embeddings
lowerCAmelCase_ : Optional[Any] = self.tokenizer(
__lowercase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase_ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase_ : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase_ : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCAmelCase_ : Any = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowercase )
# duplicate text embeddings for each generation per prompt
lowerCAmelCase_ : Optional[Any] = prompt_embeds.repeat_interleave(__lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCAmelCase_ : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
lowerCAmelCase_ : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowercase , 1 , 1 )
else:
lowerCAmelCase_ : Tuple = [''''''] * batch_size
lowerCAmelCase_ : Optional[Any] = text_input_ids.shape[-1]
lowerCAmelCase_ : Any = self.tokenizer(
__lowercase , padding='''max_length''' , max_length=__lowercase , truncation=__lowercase , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowerCAmelCase_ : str = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ : Any = negative_prompt_embeds.shape[1]
lowerCAmelCase_ : List[str] = negative_prompt_embeds.repeat(1 , __lowercase , 1 )
lowerCAmelCase_ : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ : Dict = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = 1_0_0 , __lowercase = 5.0 , __lowercase = 1.0 , __lowercase = 1 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = 1
elif isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : List[str] = len(__lowercase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__lowercase )}""" )
lowerCAmelCase_ : Tuple = batch_size * num_images_per_prompt
lowerCAmelCase_ : List[Any] = guidance_scale > 1.0
lowerCAmelCase_ : str = self._encode_prompt(__lowercase , __lowercase , __lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase )}.""" )
# get the initial completely masked latents unless the user supplied it
lowerCAmelCase_ : List[str] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCAmelCase_ : Any = self.transformer.num_vector_embeds - 1
lowerCAmelCase_ : Any = torch.full(__lowercase , __lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
lowerCAmelCase_ : List[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowercase , device=self.device )
lowerCAmelCase_ : Dict = self.scheduler.timesteps.to(self.device )
lowerCAmelCase_ : Dict = latents
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the sample if we are doing classifier free guidance
lowerCAmelCase_ : Tuple = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCAmelCase_ : int = self.transformer(__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase ).sample
if do_classifier_free_guidance:
lowerCAmelCase_ : Optional[int] = model_output.chunk(2 )
lowerCAmelCase_ : Optional[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowercase , dim=1 , keepdim=__lowercase )
lowerCAmelCase_ : Optional[int] = self.truncate(__lowercase , __lowercase )
# remove `log(0)`'s (`-inf`s)
lowerCAmelCase_ : Any = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : Dict = self.scheduler.step(__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.vqvae.config.vq_embed_dim
lowerCAmelCase_ : Any = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCAmelCase_ : int = self.vqvae.quantize.get_codebook_entry(__lowercase , shape=__lowercase )
lowerCAmelCase_ : Optional[Any] = self.vqvae.decode(__lowercase , force_not_quantize=__lowercase ).sample
lowerCAmelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ : Union[str, Any] = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> torch.FloatTensor:
lowerCAmelCase_ : int = torch.sort(__lowercase , 1 , descending=__lowercase )
lowerCAmelCase_ : Dict = torch.exp(__lowercase )
lowerCAmelCase_ : List[str] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCAmelCase_ : Tuple = torch.full_like(keep_mask[:, 0:1, :] , __lowercase )
lowerCAmelCase_ : int = torch.cat((all_true, keep_mask) , dim=1 )
lowerCAmelCase_ : Optional[Any] = keep_mask[:, :-1, :]
lowerCAmelCase_ : Dict = keep_mask.gather(1 , indices.argsort(1 ) )
lowerCAmelCase_ : int = log_p_x_0.clone()
lowerCAmelCase_ : List[str] = -torch.inf # -inf = log(0)
return rv | 710 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCAmelCase : List[str] =16
_UpperCAmelCase : Union[str, Any] =32
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
return int(x / 2**20 )
class snake_case__:
'''simple docstring'''
def __enter__( self ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase_ : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__lowercase ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase_ : Any = torch.cuda.memory_allocated()
lowerCAmelCase_ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowerCAmelCase_ : Any = bamb(self.end - self.begin )
lowerCAmelCase_ : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 16 , lowerCAmelCase_ = "bert-base-cased" , lowerCAmelCase_ = 320 , lowerCAmelCase_ = 160 , )-> Dict:
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ : Tuple = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase_ : List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
lowerCAmelCase_ : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
# Initialize accelerator
lowerCAmelCase_ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : List[str] = config['''lr''']
lowerCAmelCase_ : Any = int(config['''num_epochs'''] )
lowerCAmelCase_ : Optional[Any] = int(config['''seed'''] )
lowerCAmelCase_ : int = int(config['''batch_size'''] )
lowerCAmelCase_ : List[Any] = args.model_name_or_path
set_seed(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
# Instantiate optimizer
lowerCAmelCase_ : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ : Any = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : List[Any] = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ : str = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , )
else:
lowerCAmelCase_ : Tuple = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ : List[str] = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ : int = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ : Optional[Any] = 0
# Now we train the model
lowerCAmelCase_ : Any = {}
for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
lowerCAmelCase_ : List[Any] = model(**lowerCAmelCase_ )
lowerCAmelCase_ : Dict = outputs.loss
lowerCAmelCase_ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase_ : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( )-> Dict:
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase_ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=lowerCAmelCase_ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=lowerCAmelCase_ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase_ , default=1 , help='''Number of train epochs.''' , )
lowerCAmelCase_ : Dict = parser.parse_args()
lowerCAmelCase_ : Dict = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main() | 711 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] =logging.get_logger(__name__)
_UpperCAmelCase : Tuple ={}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """llama"""
SCREAMING_SNAKE_CASE__ : Any = ["""past_key_values"""]
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=4_0_9_6 , __lowercase=1_1_0_0_8 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=None , __lowercase="silu" , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-6 , __lowercase=True , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=1 , __lowercase=False , __lowercase=None , **__lowercase , ) -> Optional[int]:
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = num_key_value_heads
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Optional[int] = rms_norm_eps
lowerCAmelCase_ : List[str] = pretraining_tp
lowerCAmelCase_ : int = use_cache
lowerCAmelCase_ : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase , )
def lowercase_ ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
lowerCAmelCase_ : Dict = self.rope_scaling.get('''type''' , __lowercase )
lowerCAmelCase_ : int = self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 712 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""", """False""" ) ) is not True, reason="""Skipping test because should only be run when releasing minor transformers version""", )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__lowercase , )
assert hasattr(self , '''env''' )
def lowercase_ ( self , __lowercase=1 ) -> Dict:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowercase_ ( self , __lowercase ) -> List[str]:
TrainingJobAnalytics(__lowercase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ ( self ) -> int:
# create estimator
lowerCAmelCase_ : Optional[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase_ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __lowercase ) | 713 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCAmelCase : Dict =logging.get_logger(__name__)
@dataclass
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase=False , __lowercase=False , __lowercase=6.0 , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase="fp4" , __lowercase=False , **__lowercase , ) -> Any:
lowerCAmelCase_ : Union[str, Any] = load_in_abit
lowerCAmelCase_ : Union[str, Any] = load_in_abit
lowerCAmelCase_ : Optional[int] = llm_inta_threshold
lowerCAmelCase_ : Union[str, Any] = llm_inta_skip_modules
lowerCAmelCase_ : List[Any] = llm_inta_enable_fpaa_cpu_offload
lowerCAmelCase_ : List[Any] = llm_inta_has_fpaa_weight
lowerCAmelCase_ : str = bnb_abit_quant_type
lowerCAmelCase_ : List[str] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCAmelCase_ : List[str] = torch.floataa
elif isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : int = getattr(__lowercase , __lowercase )
elif isinstance(__lowercase , torch.dtype ):
lowerCAmelCase_ : str = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def lowercase_ ( self ) -> Optional[int]:
if not isinstance(self.llm_inta_threshold , __lowercase ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowercase ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowercase ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , __lowercase ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , __lowercase ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , __lowercase ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def lowercase_ ( self ) -> str:
return self.load_in_abit or self.load_in_abit
def lowercase_ ( self ) -> Dict:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> str:
lowerCAmelCase_ : Union[str, Any] = cls(**__lowercase )
lowerCAmelCase_ : Dict = []
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase ):
setattr(__lowercase , __lowercase , __lowercase )
to_remove.append(__lowercase )
for key in to_remove:
kwargs.pop(__lowercase , __lowercase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
lowerCAmelCase_ : Union[str, Any] = self.to_dict()
lowerCAmelCase_ : Optional[int] = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '''\n'''
writer.write(__lowercase )
def lowercase_ ( self ) -> Dict[str, Any]:
lowerCAmelCase_ : Dict = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Any = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self ) -> int:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def lowercase_ ( self , __lowercase = True ) -> str:
if use_diff is True:
lowerCAmelCase_ : List[Any] = self.to_diff_dict()
else:
lowerCAmelCase_ : Tuple = self.to_dict()
return json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + "\n"
def lowercase_ ( self ) -> Dict[str, Any]:
lowerCAmelCase_ : int = self.to_dict()
# get the default config dict
lowerCAmelCase_ : int = BitsAndBytesConfig().to_dict()
lowerCAmelCase_ : Any = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCAmelCase_ : List[Any] = value
return serializable_config_dict | 714 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000 )-> int:
return sum(e for e in range(3 , lowerCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""") | 715 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE__ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE__ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE__ : int = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE__ : str = False
@property
def lowercase_ ( self ) -> Any:
return 3_2
@property
def lowercase_ ( self ) -> List[str]:
return 3_2
@property
def lowercase_ ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> Dict:
return 8
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase_ : int = CLIPVisionModel(__lowercase )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCAmelCase_ : int = PriorTransformer(**__lowercase )
return model
@property
def lowercase_ ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase_ : int = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase_ : Tuple = ShapERenderer(**__lowercase )
return model
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = self.dummy_prior
lowerCAmelCase_ : int = self.dummy_image_encoder
lowerCAmelCase_ : Optional[Any] = self.dummy_image_processor
lowerCAmelCase_ : List[str] = self.dummy_renderer
lowerCAmelCase_ : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
lowerCAmelCase_ : Tuple = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase_ ( self , __lowercase , __lowercase=0 ) -> Optional[Any]:
lowerCAmelCase_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ : Any = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ : Dict = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[int] = '''cpu'''
lowerCAmelCase_ : Tuple = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**__lowercase )
lowerCAmelCase_ : Optional[int] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
lowerCAmelCase_ : Optional[int] = output.images[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowerCAmelCase_ : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = torch_device == '''cpu'''
lowerCAmelCase_ : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**__lowercase )
lowerCAmelCase_ : Optional[Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Tuple = self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase_ : Dict = batch_size * [inputs[key]]
lowerCAmelCase_ : List[Any] = pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCAmelCase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCAmelCase_ : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCAmelCase_ : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : str = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 716 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , ) -> List[str]:
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Optional[Any] = min_resolution
lowerCAmelCase_ : List[str] = max_resolution
lowerCAmelCase_ : Any = do_resize
lowerCAmelCase_ : str = size if size is not None else {'''height''': 1_8, '''width''': 2_0}
lowerCAmelCase_ : List[str] = do_thumbnail
lowerCAmelCase_ : str = do_align_axis
lowerCAmelCase_ : Any = do_pad
lowerCAmelCase_ : Any = do_normalize
lowerCAmelCase_ : Optional[Any] = image_mean
lowerCAmelCase_ : str = image_std
def lowercase_ ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = DonutImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowercase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowercase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 2_0} )
lowerCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'''height''': 8_4, '''width''': 4_2} )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def lowercase_ ( self ) -> Optional[Any]:
# Initialize image_processing
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Optional[int] = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : List[str] = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , ) | 717 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 0 |
import os
import sys
import unittest
_UpperCAmelCase : int =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCAmelCase : Union[str, Any] =os.path.join(git_repo_path, """src""", """diffusers""")
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Any = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__lowercase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowerCAmelCase_ : List[str] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__lowercase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowerCAmelCase_ : Dict = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__lowercase , '''torch_and_transformers_and_onnx''' )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __lowercase )
self.assertIn('''torch_and_transformers''' , __lowercase )
self.assertIn('''flax_and_transformers''' , __lowercase )
self.assertIn('''torch_and_transformers_and_onnx''' , __lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__lowercase , '''\nCONSTANT = None\n''' )
lowerCAmelCase_ : Optional[int] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__lowercase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowerCAmelCase_ : List[Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
lowerCAmelCase_ : List[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Any = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
lowerCAmelCase_ : Dict = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __lowercase ) | 718 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
import math
import sys
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
if number != int(lowerCAmelCase_ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
lowerCAmelCase_ : str = [-1] * (number + 1)
lowerCAmelCase_ : Optional[Any] = 0
for i in range(1 , number + 1 ):
lowerCAmelCase_ : List[str] = sys.maxsize
lowerCAmelCase_ : Tuple = int(math.sqrt(lowerCAmelCase_ ) )
for j in range(1 , root + 1 ):
lowerCAmelCase_ : str = 1 + answers[i - (j**2)]
lowerCAmelCase_ : Tuple = min(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 719 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 0 |
def lowerCAmelCase ( lowerCAmelCase_=28_123 )-> str:
lowerCAmelCase_ : List[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCAmelCase_ : Optional[int] = set()
lowerCAmelCase_ : Dict = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowerCAmelCase_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 720 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 0 |
import pytest
_UpperCAmelCase : List[str] ="""__dummy_dataset1__"""
_UpperCAmelCase : Dict ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def lowerCAmelCase ( )-> int:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCAmelCase ( )-> Tuple:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
lowerCAmelCase_ : List[str] = dataset_loading_script_name
lowerCAmelCase_ : Union[str, Any] = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = script_dir / f"""{script_name}.py"""
with open(lowerCAmelCase_ , '''w''' ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ ) | 721 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 0 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 700 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="attention" )-> List[Any]:
lowerCAmelCase_ : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowerCAmelCase_ : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCAmelCase_ : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowerCAmelCase_ : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCAmelCase_ : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowerCAmelCase_ : Optional[int] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCAmelCase_ : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowerCAmelCase_ : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False )-> Any:
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowerCAmelCase_ : Optional[int] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowerCAmelCase_ : Optional[Any] = (wi_a, wi_a)
else:
lowerCAmelCase_ : List[str] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowerCAmelCase_ : int = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCAmelCase ( lowerCAmelCase_ , *, lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False )-> Dict:
lowerCAmelCase_ : Optional[int] = traverse_util.flatten_dict(variables['''target'''] )
lowerCAmelCase_ : List[Any] = {'''/'''.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , lowerCAmelCase_ )
lowerCAmelCase_ : Dict = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Tuple = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' )
lowerCAmelCase_ : Tuple = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' , '''attention''' )
lowerCAmelCase_ : Any = layer_norm
lowerCAmelCase_ : int = k.T
lowerCAmelCase_ : Tuple = o.T
lowerCAmelCase_ : Dict = q.T
lowerCAmelCase_ : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' )
lowerCAmelCase_ : List[str] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[Any] = wi[0].T
lowerCAmelCase_ : Optional[int] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ : Dict = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' ).T
lowerCAmelCase_ : Tuple = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowerCAmelCase_ : Dict = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , '''encoder''' ).T
lowerCAmelCase_ : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowerCAmelCase_ : Optional[Any] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''self_attention''' )
lowerCAmelCase_ : Optional[Any] = layer_norm
lowerCAmelCase_ : str = k.T
lowerCAmelCase_ : Optional[Any] = o.T
lowerCAmelCase_ : Optional[Any] = q.T
lowerCAmelCase_ : int = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowerCAmelCase_ : str = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' )
lowerCAmelCase_ : Tuple = layer_norm
lowerCAmelCase_ : List[str] = k.T
lowerCAmelCase_ : Dict = o.T
lowerCAmelCase_ : int = q.T
lowerCAmelCase_ : int = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' )
lowerCAmelCase_ : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , lowerCAmelCase_ )
lowerCAmelCase_ : Dict = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : Union[str, Any] = wi[1].T
else:
lowerCAmelCase_ : Union[str, Any] = wi.T
lowerCAmelCase_ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' ).T
lowerCAmelCase_ : int = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Tuple = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowerCAmelCase_ : Tuple = state_dict['''shared.weight''']
return state_dict
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : Union[str, Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , )-> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = MTaConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Tuple = UMTaEncoderModel(lowerCAmelCase_ )
else:
lowerCAmelCase_ : Tuple = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : Dict =argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Optional[Any] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
) | 701 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( ):
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 702 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """gptsan-japanese"""
SCREAMING_SNAKE_CASE__ : str = [
"""past_key_values""",
]
SCREAMING_SNAKE_CASE__ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowercase=3_6_0_0_0 , __lowercase=1_2_8_0 , __lowercase=1_0_2_4 , __lowercase=8_1_9_2 , __lowercase=4_0_9_6 , __lowercase=1_2_8 , __lowercase=1_0 , __lowercase=0 , __lowercase=1_6 , __lowercase=1_6 , __lowercase=1_2_8 , __lowercase=0.0 , __lowercase=1e-5 , __lowercase=False , __lowercase=0.0 , __lowercase="float32" , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=0.0_02 , __lowercase=False , __lowercase=True , __lowercase=3_5_9_9_8 , __lowercase=3_5_9_9_5 , __lowercase=3_5_9_9_9 , **__lowercase , ) -> Tuple:
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Optional[int] = d_model
lowerCAmelCase_ : Any = d_ff
lowerCAmelCase_ : Tuple = d_ext
lowerCAmelCase_ : List[str] = d_spout
lowerCAmelCase_ : Optional[Any] = num_switch_layers
lowerCAmelCase_ : Tuple = num_ext_layers
lowerCAmelCase_ : List[Any] = num_switch_layers + num_ext_layers
lowerCAmelCase_ : int = num_heads
lowerCAmelCase_ : str = num_experts
lowerCAmelCase_ : Optional[int] = expert_capacity
lowerCAmelCase_ : int = dropout_rate
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Dict = router_bias
lowerCAmelCase_ : Optional[int] = router_jitter_noise
lowerCAmelCase_ : Tuple = router_dtype
lowerCAmelCase_ : Union[str, Any] = router_ignore_padding_tokens
lowerCAmelCase_ : Tuple = output_hidden_states
lowerCAmelCase_ : Optional[int] = output_attentions
lowerCAmelCase_ : int = initializer_factor
lowerCAmelCase_ : Any = output_router_logits
lowerCAmelCase_ : int = use_cache
super().__init__(
separator_token_id=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , ) | 703 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 0 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 704 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 0 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 706 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase : Tuple ={"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 707 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Any =logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : Optional[str] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[int]
SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None
SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None
SCREAMING_SNAKE_CASE__ : Optional[Union[int, float]] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[InputFeatures]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase=False , __lowercase = False , ) -> Tuple:
lowerCAmelCase_ : Optional[int] = hans_processors[task]()
lowerCAmelCase_ : str = os.path.join(
__lowercase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__lowercase ) , __lowercase , ) , )
lowerCAmelCase_ : Any = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ : Union[str, Any] = label_list[2], label_list[1]
lowerCAmelCase_ : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase_ : Dict = cached_features_file + '''.lock'''
with FileLock(__lowercase ):
if os.path.exists(__lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowerCAmelCase_ : Optional[int] = torch.load(__lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowerCAmelCase_ : Optional[int] = (
processor.get_dev_examples(__lowercase ) if evaluate else processor.get_train_examples(__lowercase )
)
logger.info('''Training examples: %s''' , len(__lowercase ) )
lowerCAmelCase_ : Any = hans_convert_examples_to_features(__lowercase , __lowercase , __lowercase , __lowercase )
logger.info('''Saving features into cached file %s''' , __lowercase )
torch.save(self.features , __lowercase )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , __lowercase ) -> InputFeatures:
return self.features[i]
def lowercase_ ( self ) -> List[str]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[InputFeatures]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 1_2_8 , __lowercase=False , __lowercase = False , ) -> Optional[Any]:
lowerCAmelCase_ : Dict = hans_processors[task]()
lowerCAmelCase_ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ : Tuple = label_list[2], label_list[1]
lowerCAmelCase_ : Dict = label_list
lowerCAmelCase_ : List[str] = processor.get_dev_examples(__lowercase ) if evaluate else processor.get_train_examples(__lowercase )
lowerCAmelCase_ : str = hans_convert_examples_to_features(__lowercase , __lowercase , __lowercase , __lowercase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__lowercase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase_ : Tuple = tf.data.Dataset.from_generator(
__lowercase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase_ ( self ) -> Optional[int]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , __lowercase ) -> InputFeatures:
return self.features[i]
def lowercase_ ( self ) -> Any:
return self.label_list
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(__lowercase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def lowercase_ ( self , __lowercase ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(__lowercase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def lowercase_ ( self ) -> int:
return ["contradiction", "entailment", "neutral"]
def lowercase_ ( self , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = []
for i, line in enumerate(__lowercase ):
if i == 0:
continue
lowerCAmelCase_ : Tuple = '''%s-%s''' % (set_type, line[0])
lowerCAmelCase_ : List[str] = line[5]
lowerCAmelCase_ : List[Any] = line[6]
lowerCAmelCase_ : int = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowerCAmelCase_ : Any = line[0]
examples.append(InputExample(guid=__lowercase , text_a=__lowercase , text_b=__lowercase , label=__lowercase , pairID=__lowercase ) )
return examples
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> Any:
lowerCAmelCase_ : Optional[Any] = {label: i for i, label in enumerate(lowerCAmelCase_ )}
lowerCAmelCase_ : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase_ ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowerCAmelCase_ : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , truncation=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , )
lowerCAmelCase_ : Optional[int] = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase_ , label=lowerCAmelCase_ , pairID=lowerCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
_UpperCAmelCase : List[Any] ={
"""hans""": 3,
}
_UpperCAmelCase : str ={
"""hans""": HansProcessor,
}
| 708 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] ={
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = """mra"""
def __init__( self , __lowercase=5_0_2_6_5 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase="absolute" , __lowercase=4 , __lowercase="full" , __lowercase=0 , __lowercase=0 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ) -> List[str]:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : List[Any] = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : Optional[int] = type_vocab_size
lowerCAmelCase_ : Optional[int] = layer_norm_eps
lowerCAmelCase_ : Any = position_embedding_type
lowerCAmelCase_ : List[str] = block_per_row
lowerCAmelCase_ : List[str] = approx_mode
lowerCAmelCase_ : Optional[Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Tuple = initial_prior_diagonal_n_blocks | 709 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 0 |
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : int ={
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int =[
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_UpperCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 711 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0.2 , __lowercase=0.2 ) -> str:
lowerCAmelCase_ : Union[str, Any] = bp_numa
lowerCAmelCase_ : Any = bp_numa
lowerCAmelCase_ : Dict = bp_numa
lowerCAmelCase_ : Optional[int] = conva_get[:2]
lowerCAmelCase_ : Any = conva_get[2]
lowerCAmelCase_ : Optional[int] = size_pa
lowerCAmelCase_ : Any = rate_w
lowerCAmelCase_ : Union[str, Any] = rate_t
lowerCAmelCase_ : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCAmelCase_ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
lowerCAmelCase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase_ ( self , __lowercase ) -> Dict:
# save model dict with pickle
lowerCAmelCase_ : str = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(__lowercase , '''wb''' ) as f:
pickle.dump(__lowercase , __lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase_ ( cls , __lowercase ) -> List[Any]:
# read saved model
with open(__lowercase , '''rb''' ) as f:
lowerCAmelCase_ : int = pickle.load(__lowercase ) # noqa: S301
lowerCAmelCase_ : List[Any] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''size_pooling1''' )
lowerCAmelCase_ : Tuple = model_dic.get('''num_bp1''' )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''num_bp2''' )
lowerCAmelCase_ : Dict = model_dic.get('''num_bp3''' )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''rate_weight''' )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''rate_thre''' )
# create model instance
lowerCAmelCase_ : Any = CNN(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# modify model parameter
lowerCAmelCase_ : Any = model_dic.get('''w_conv1''' )
lowerCAmelCase_ : Dict = model_dic.get('''wkj''' )
lowerCAmelCase_ : str = model_dic.get('''vji''' )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''thre_conv1''' )
lowerCAmelCase_ : Any = model_dic.get('''thre_bp2''' )
lowerCAmelCase_ : List[str] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
return 1 / (1 + np.exp(-1 * x ))
def lowercase_ ( self , __lowercase ) -> int:
return round(__lowercase , 3 )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# convolution process
lowerCAmelCase_ : int = convs[0]
lowerCAmelCase_ : Optional[Any] = convs[1]
lowerCAmelCase_ : List[Any] = np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
lowerCAmelCase_ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , __lowercase ):
lowerCAmelCase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
lowerCAmelCase_ : List[Any] = []
for i_focus in range(len(__lowercase ) ):
lowerCAmelCase_ : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = np.asmatrix(__lowercase ).reshape(
__lowercase , __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
lowerCAmelCase_ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
lowerCAmelCase_ : Tuple = np.asarray(__lowercase )
return focus_list, data_featuremap
def lowercase_ ( self , __lowercase , __lowercase , __lowercase="average_pool" ) -> int:
# pooling process
lowerCAmelCase_ : Any = len(featuremaps[0] )
lowerCAmelCase_ : List[Any] = int(size_map / size_pooling )
lowerCAmelCase_ : Any = []
for i_map in range(len(__lowercase ) ):
lowerCAmelCase_ : str = featuremaps[i_map]
lowerCAmelCase_ : List[str] = []
for i_focus in range(0 , __lowercase , __lowercase ):
for j_focus in range(0 , __lowercase , __lowercase ):
lowerCAmelCase_ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
lowerCAmelCase_ : Any = np.asmatrix(__lowercase ).reshape(__lowercase , __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def lowercase_ ( self , __lowercase ) -> List[str]:
# expanding three dimension data to one dimension list
lowerCAmelCase_ : Dict = []
for i in range(len(__lowercase ) ):
lowerCAmelCase_ : List[str] = np.shape(data[i] )
lowerCAmelCase_ : Optional[int] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCAmelCase_ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
lowerCAmelCase_ : Dict = np.asarray(__lowercase )
return data_expanded
def lowercase_ ( self , __lowercase ) -> int:
# expanding matrix to one dimension list
lowerCAmelCase_ : str = np.asarray(__lowercase )
lowerCAmelCase_ : Optional[Any] = np.shape(__lowercase )
lowerCAmelCase_ : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Any = 0
for i_map in range(__lowercase ):
lowerCAmelCase_ : int = np.ones((size_map, size_map) )
for i in range(0 , __lowercase , __lowercase ):
for j in range(0 , __lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = pd_pool[
i_pool
]
lowerCAmelCase_ : Union[str, Any] = i_pool + 1
lowerCAmelCase_ : Optional[int] = np.multiply(
__lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=bool ) -> Optional[Any]:
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(__lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(__lowercase )) )
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase_ : Tuple = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase_ : str = np.asmatrix(datas_train[p] )
lowerCAmelCase_ : List[Any] = np.asarray(datas_teach[p] )
lowerCAmelCase_ : Tuple = self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase_ : Union[str, Any] = self.pooling(__lowercase , self.size_poolinga )
lowerCAmelCase_ : List[str] = np.shape(__lowercase )
lowerCAmelCase_ : List[Any] = self._expand(__lowercase )
lowerCAmelCase_ : Optional[int] = data_bp_input
lowerCAmelCase_ : Dict = np.dot(__lowercase , self.vji.T ) - self.thre_bpa
lowerCAmelCase_ : Union[str, Any] = self.sig(__lowercase )
lowerCAmelCase_ : int = np.dot(__lowercase , self.wkj.T ) - self.thre_bpa
lowerCAmelCase_ : int = self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase_ : Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowercase , (1 - bp_outa) ) )
lowerCAmelCase_ : Tuple = np.multiply(
np.dot(__lowercase , self.wkj ) , np.multiply(__lowercase , (1 - bp_outa) ) )
lowerCAmelCase_ : Any = np.dot(__lowercase , self.vji )
lowerCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase_ : List[Any] = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase_ : List[str] = self._calculate_gradient_from_pool(
__lowercase , __lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCAmelCase_ : int = self._expand_mat(pd_conva_all[k_conv] )
lowerCAmelCase_ : int = self.rate_weight * np.dot(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCAmelCase_ : List[str] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCAmelCase_ : Union[str, Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase_ : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase_ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase_ : int = rp + 1
lowerCAmelCase_ : Dict = error_count / patterns
all_mse.append(__lowercase )
def draw_error():
lowerCAmelCase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase , '''+-''' )
plt.plot(__lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(__lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase_ ( self , __lowercase ) -> Optional[int]:
# model predict
lowerCAmelCase_ : Optional[int] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
lowerCAmelCase_ : str = np.asmatrix(datas_test[p] )
lowerCAmelCase_ : Optional[int] = self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase_ : Dict = self.pooling(__lowercase , self.size_poolinga )
lowerCAmelCase_ : Optional[int] = self._expand(__lowercase )
lowerCAmelCase_ : Tuple = data_bp_input
lowerCAmelCase_ : Tuple = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase_ : int = self.sig(__lowercase )
lowerCAmelCase_ : List[Any] = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase_ : Union[str, Any] = self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
lowerCAmelCase_ : Optional[Any] = [list(map(self.do_round , __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def lowercase_ ( self , __lowercase ) -> int:
# return the data of image after convoluting process so we can check it out
lowerCAmelCase_ : Optional[Any] = np.asmatrix(__lowercase )
lowerCAmelCase_ : Optional[int] = self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase_ : str = self.pooling(__lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 712 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 713 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionSAGPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def lowercase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCAmelCase_ : Tuple = CLIPTextModel(__lowercase )
lowerCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self , __lowercase , __lowercase=0 ) -> Tuple:
if str(__lowercase ).startswith('''mps''' ):
lowerCAmelCase_ : List[str] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ : Any = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowerCAmelCase_ : Optional[int] = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : List[Any] = '''.'''
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' )
lowerCAmelCase_ : List[str] = output.images
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Any = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[str] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCAmelCase_ : int = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''.'''
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' )
lowerCAmelCase_ : Optional[int] = output.images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Tuple = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCAmelCase_ : Tuple = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''.'''
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' , )
lowerCAmelCase_ : Tuple = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3) | 714 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
# Algorithm for the pigeonhole sorting
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : List[Any] = min(lowerCAmelCase_ ) # min() finds the minimum value
lowerCAmelCase_ : Optional[int] = max(lowerCAmelCase_ ) # max() finds the maximum value
lowerCAmelCase_ : List[str] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCAmelCase_ : Tuple = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCAmelCase_ : int = 0
for count in range(lowerCAmelCase_ ):
while holes[count] > 0:
holes[count] -= 1
lowerCAmelCase_ : List[str] = count + min_val
i += 1
def lowerCAmelCase ( )-> List[str]:
lowerCAmelCase_ : Tuple = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase_ )
print('''Sorted order is:''' , ''' '''.join(lowerCAmelCase_ ) )
if __name__ == "__main__":
main() | 715 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : str ={
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict =["""ConditionalDetrFeatureExtractor"""]
_UpperCAmelCase : Union[str, Any] =["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =[
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Any ={
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """marian"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=5_8_1_0_1 , __lowercase=None , __lowercase=1_0_2_4 , __lowercase=1_2 , __lowercase=4_0_9_6 , __lowercase=1_6 , __lowercase=1_2 , __lowercase=4_0_9_6 , __lowercase=1_6 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=True , __lowercase=True , __lowercase="gelu" , __lowercase=1_0_2_4 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=5_8_1_0_0 , __lowercase=False , __lowercase=5_8_1_0_0 , __lowercase=0 , __lowercase=0 , __lowercase=True , **__lowercase , ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Union[str, Any] = decoder_vocab_size or vocab_size
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = d_model
lowerCAmelCase_ : List[Any] = encoder_ffn_dim
lowerCAmelCase_ : Optional[int] = encoder_layers
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[int] = decoder_layers
lowerCAmelCase_ : Union[str, Any] = decoder_attention_heads
lowerCAmelCase_ : Union[str, Any] = dropout
lowerCAmelCase_ : str = attention_dropout
lowerCAmelCase_ : Any = activation_dropout
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Optional[Any] = init_std
lowerCAmelCase_ : Optional[int] = encoder_layerdrop
lowerCAmelCase_ : Tuple = decoder_layerdrop
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : Union[str, Any] = encoder_layers
lowerCAmelCase_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : Any = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ : Tuple = {0: '''batch'''}
lowerCAmelCase_ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase_ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase_ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ : int = self.num_layers
for i in range(__lowercase ):
lowerCAmelCase_ : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase_ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase_ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : int = super().outputs
else:
lowerCAmelCase_ : Dict = super(__lowercase , self ).outputs
if self.use_past:
lowerCAmelCase_ : Dict = self.num_layers
for i in range(__lowercase ):
lowerCAmelCase_ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase_ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
lowerCAmelCase_ : int = seq_length if not self.use_past else 1
lowerCAmelCase_ : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase_ : Any = dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ : str = common_inputs['''input_ids'''].shape
lowerCAmelCase_ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase_ : Optional[int] = self.num_attention_heads
lowerCAmelCase_ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : int = decoder_seq_length + 3
lowerCAmelCase_ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase_ : Tuple = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase )] , dim=1 )
lowerCAmelCase_ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase_ : Tuple = self.num_layers
lowerCAmelCase_ : List[str] = min(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = max(__lowercase , __lowercase ) - min_num_layers
lowerCAmelCase_ : List[str] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
lowerCAmelCase_ : Union[str, Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[int] = seqlen + 2
lowerCAmelCase_ : List[str] = self.num_layers
lowerCAmelCase_ : int = self.num_attention_heads
lowerCAmelCase_ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : List[Any] = common_inputs['''attention_mask'''].dtype
lowerCAmelCase_ : str = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
lowerCAmelCase_ : Tuple = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Dict = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : Optional[int] = tokenizer.num_special_tokens_to_add(__lowercase )
lowerCAmelCase_ : int = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase_ : Optional[int] = dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
lowerCAmelCase_ : Dict = self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Optional[Any] = super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
lowerCAmelCase_ : Optional[Any] = super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
@property
def lowercase_ ( self ) -> float:
return 1e-4 | 717 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase : Tuple ={"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 718 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 719 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 0 |
_UpperCAmelCase : str =tuple[float, float, float]
_UpperCAmelCase : Any =tuple[float, float, float]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Vectorad:
lowerCAmelCase_ : Optional[Any] = end_pointa[0] - end_pointa[0]
lowerCAmelCase_ : str = end_pointa[1] - end_pointa[1]
lowerCAmelCase_ : List[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Vectorad:
lowerCAmelCase_ : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCAmelCase_ : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCAmelCase_ : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> bool:
return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 )-> bool:
lowerCAmelCase_ : Optional[Any] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) | 720 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 0 |
import cva
import numpy as np
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ) -> List[Any]:
if k in (0.04, 0.06):
lowerCAmelCase_ : Tuple = k
lowerCAmelCase_ : List[str] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
return str(self.k )
def lowercase_ ( self , __lowercase ) -> tuple[cva.Mat, list[list[int]]]:
lowerCAmelCase_ : List[Any] = cva.imread(__lowercase , 0 )
lowerCAmelCase_ : Dict = img.shape
lowerCAmelCase_ : list[list[int]] = []
lowerCAmelCase_ : Optional[int] = img.copy()
lowerCAmelCase_ : int = cva.cvtColor(__lowercase , cva.COLOR_GRAY2RGB )
lowerCAmelCase_ : Tuple = np.gradient(__lowercase )
lowerCAmelCase_ : Optional[int] = dx**2
lowerCAmelCase_ : List[Any] = dy**2
lowerCAmelCase_ : Optional[Any] = dx * dy
lowerCAmelCase_ : Optional[Any] = 0.04
lowerCAmelCase_ : int = self.window_size // 2
for y in range(__lowercase , h - offset ):
for x in range(__lowercase , w - offset ):
lowerCAmelCase_ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : List[str] = (wxx * wyy) - (wxy**2)
lowerCAmelCase_ : Any = wxx + wyy
lowerCAmelCase_ : Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] =HarrisCorner(0.04, 3)
_UpperCAmelCase : int =edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img) | 721 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Optional[int] = ''
__lowerCAmelCase : str = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _A = None , _A = None , **_A , ):
super().__init__(self , **_A)
SCREAMING_SNAKE_CASE_ = repo_info
SCREAMING_SNAKE_CASE_ = token
SCREAMING_SNAKE_CASE_ = None
def lowerCAmelCase__ ( self):
if self.dir_cache is None:
SCREAMING_SNAKE_CASE_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE_ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_A): {'name': str(_A), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def lowerCAmelCase__ ( self , _A , _A = "rb" , **_A , ):
if not isinstance(self.repo_info , _A):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""")
SCREAMING_SNAKE_CASE_ = hf_hub_url(self.repo_info.id , _A , revision=self.repo_info.sha)
return fsspec.open(
_A , mode=_A , headers=get_authentication_headers_for_url(_A , use_auth_token=self.token) , client_kwargs={'trust_env': True} , ).open()
def lowerCAmelCase__ ( self , _A , **_A):
self._get_dirs()
SCREAMING_SNAKE_CASE_ = self._strip_protocol(_A)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_A)
def lowerCAmelCase__ ( self , _A , _A=False , **_A):
self._get_dirs()
SCREAMING_SNAKE_CASE_ = PurePosixPath(path.strip('/'))
SCREAMING_SNAKE_CASE_ = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE_ = PurePosixPath(p.strip('/'))
SCREAMING_SNAKE_CASE_ = p.parent
if root == path:
SCREAMING_SNAKE_CASE_ = f
SCREAMING_SNAKE_CASE_ = list(paths.values())
if detail:
return out
else:
return sorted(f['name'] for f in out)
| 620 |
from typing import List
import numpy as np
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 )
return max(1 , _SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for group_idx in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add )
shards_indices_per_group.append(_SCREAMING_SNAKE_CASE )
return shards_indices_per_group
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
if num_shards == 1:
return [dict(_SCREAMING_SNAKE_CASE )]
else:
SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_SCREAMING_SNAKE_CASE ) )
]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
SCREAMING_SNAKE_CASE_ = {}
for size in list_sizes:
SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE )
for key, value in shuffled_kwargs.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]]
return shuffled_kwargs
| 620 | 1 |
from __future__ import annotations
from math import pi, sqrt
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Any = 'biogpt'
def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = scale_embedding
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = activation_dropout
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
| 620 | 1 |
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE )
print('The following activities are selected:' )
# The first activity is always selected
SCREAMING_SNAKE_CASE_ = 0
print(_SCREAMING_SNAKE_CASE , end=',' )
# Consider rest of the activities
for j in range(_SCREAMING_SNAKE_CASE ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_SCREAMING_SNAKE_CASE , end=',' )
SCREAMING_SNAKE_CASE_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[str] = [1, 3, 0, 5, 8, 5]
UpperCamelCase__ : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 620 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , _A , _A , _A , _A = None , ):
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A)
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(','):
SCREAMING_SNAKE_CASE_ = int(_A)
SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items()))
def lowerCAmelCase__ ( self , _A):
if not isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = list(_A)
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ):
SCREAMING_SNAKE_CASE_ = len(_A)
SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1)
SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device)
SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2]
SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0)
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A)
SCREAMING_SNAKE_CASE_ = t
if not torch.is_tensor(_A):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps'
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
SCREAMING_SNAKE_CASE_ = self.transformer(
_A , timestep=_A , class_labels=_A).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0)
SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0)
SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1)
else:
SCREAMING_SNAKE_CASE_ = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0)
else:
SCREAMING_SNAKE_CASE_ = latent_model_input
SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample
SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A)
| 620 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , _A , _A , _A , _A = None , ):
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A)
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(','):
SCREAMING_SNAKE_CASE_ = int(_A)
SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items()))
def lowerCAmelCase__ ( self , _A):
if not isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = list(_A)
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ):
SCREAMING_SNAKE_CASE_ = len(_A)
SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1)
SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device)
SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2]
SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0)
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A)
SCREAMING_SNAKE_CASE_ = t
if not torch.is_tensor(_A):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps'
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
SCREAMING_SNAKE_CASE_ = self.transformer(
_A , timestep=_A , class_labels=_A).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0)
SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0)
SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1)
else:
SCREAMING_SNAKE_CASE_ = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0)
else:
SCREAMING_SNAKE_CASE_ = latent_model_input
SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample
SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A)
| 620 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __snake_case :
def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2):
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = conva_get[:2]
SCREAMING_SNAKE_CASE_ = conva_get[2]
SCREAMING_SNAKE_CASE_ = size_pa
SCREAMING_SNAKE_CASE_ = rate_w
SCREAMING_SNAKE_CASE_ = rate_t
SCREAMING_SNAKE_CASE_ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1
def lowerCAmelCase__ ( self , _A):
# save model dict with pickle
SCREAMING_SNAKE_CASE_ = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(_A , 'wb') as f:
pickle.dump(_A , _A)
print(f"""Model saved: {save_path}""")
@classmethod
def lowerCAmelCase__ ( cls , _A):
# read saved model
with open(_A , 'rb') as f:
SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301
SCREAMING_SNAKE_CASE_ = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3')
SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight')
SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre')
# create model instance
SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A)
# modify model parameter
SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1')
SCREAMING_SNAKE_CASE_ = model_dic.get('wkj')
SCREAMING_SNAKE_CASE_ = model_dic.get('vji')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3')
return conv_ins
def lowerCAmelCase__ ( self , _A):
return 1 / (1 + np.exp(-1 * x))
def lowerCAmelCase__ ( self , _A):
return round(_A , 3)
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A):
# convolution process
SCREAMING_SNAKE_CASE_ = convs[0]
SCREAMING_SNAKE_CASE_ = convs[1]
SCREAMING_SNAKE_CASE_ = np.shape(_A)[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(0 , size_data - size_conv + 1 , _A):
for j_focus in range(0 , size_data - size_conv + 1 , _A):
SCREAMING_SNAKE_CASE_ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_A)
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1)
for i_map in range(_A):
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(len(_A)):
SCREAMING_SNAKE_CASE_ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(_A))
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(
_A , _A)
data_featuremap.append(_A)
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE_ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_A))
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
return focus_list, data_featuremap
def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"):
# pooling process
SCREAMING_SNAKE_CASE_ = len(featuremaps[0])
SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling)
SCREAMING_SNAKE_CASE_ = []
for i_map in range(len(_A)):
SCREAMING_SNAKE_CASE_ = featuremaps[i_map]
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(0 , _A , _A):
for j_focus in range(0 , _A , _A):
SCREAMING_SNAKE_CASE_ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_A))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_A))
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A)
featuremap_pooled.append(_A)
return featuremap_pooled
def lowerCAmelCase__ ( self , _A):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A)):
SCREAMING_SNAKE_CASE_ = np.shape(data[i])
SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1])
SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0]
data_expanded.extend(_A)
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
return data_expanded
def lowerCAmelCase__ ( self , _A):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
SCREAMING_SNAKE_CASE_ = np.shape(_A)
SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
for i_map in range(_A):
SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map))
for i in range(0 , _A , _A):
for j in range(0 , _A , _A):
SCREAMING_SNAKE_CASE_ = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE_ = i_pool + 1
SCREAMING_SNAKE_CASE_ = np.multiply(
_A , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(_A)
return pd_all
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool):
# model traning
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(_A)))
print((' - - Shape: Teach_Data ', np.shape(_A)))
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 10000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE_ = 0
print(f"""-------------Learning Time {rp}--------------""")
for p in range(len(_A)):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p])
SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
SCREAMING_SNAKE_CASE_ = np.shape(_A)
SCREAMING_SNAKE_CASE_ = self._expand(_A)
SCREAMING_SNAKE_CASE_ = data_bp_input
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE_ = np.multiply(
(data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa)))
SCREAMING_SNAKE_CASE_ = np.multiply(
np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa)))
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji)
SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool(
_A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv])
SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A)
SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
SCREAMING_SNAKE_CASE_ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE_ = rp + 1
SCREAMING_SNAKE_CASE_ = error_count / patterns
all_mse.append(_A)
def draw_error():
SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(_A , '+-')
plt.plot(_A , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(_A , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}"""))
if draw_e:
draw_error()
return mse
def lowerCAmelCase__ ( self , _A):
# model predict
SCREAMING_SNAKE_CASE_ = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(_A)))
for p in range(len(_A)):
SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
SCREAMING_SNAKE_CASE_ = self._expand(_A)
SCREAMING_SNAKE_CASE_ = data_bp_input
SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
produce_out.extend(bp_outa.getA().tolist())
SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out]
return np.asarray(_A)
def lowerCAmelCase__ ( self , _A):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 620 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , _A , _A=768):
super().__init__(_A)
SCREAMING_SNAKE_CASE_ = proj_size
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(_A)
SCREAMING_SNAKE_CASE_ = PaintByExampleMapper(_A)
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size)
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , self.proj_size)
# uncondition for scaling
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn((1, 1, self.proj_size)))
def lowerCAmelCase__ ( self , _A , _A=False):
SCREAMING_SNAKE_CASE_ = self.model(pixel_values=_A)
SCREAMING_SNAKE_CASE_ = clip_output.pooler_output
SCREAMING_SNAKE_CASE_ = self.mapper(latent_states[:, None])
SCREAMING_SNAKE_CASE_ = self.final_layer_norm(_A)
SCREAMING_SNAKE_CASE_ = self.proj_out(_A)
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __snake_case ( nn.Module ):
def __init__( self , _A):
super().__init__()
SCREAMING_SNAKE_CASE_ = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE_ = config.hidden_size
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = nn.ModuleList(
[
BasicTransformerBlock(_A , _A , _A , activation_fn='gelu' , attention_bias=_A)
for _ in range(_A)
])
def lowerCAmelCase__ ( self , _A):
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_A)
return hidden_states
| 620 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ = '636036'
SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' )
return results
| 620 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ : Dict = logging.get_logger("transformers.models.speecht5")
UpperCamelCase__ : int = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
UpperCamelCase__ : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
UpperCamelCase__ : int = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
UpperCamelCase__ : Union[str, Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
UpperCamelCase__ : Any = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
UpperCamelCase__ : Optional[Any] = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
UpperCamelCase__ : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
UpperCamelCase__ : List[Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
UpperCamelCase__ : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ : Union[str, Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Dict = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
UpperCamelCase__ : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
UpperCamelCase__ : str = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
UpperCamelCase__ : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
if task == "s2t":
SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ = MAPPING_S2T
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = MAPPING_T2S
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ = MAPPING_S2S
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(f"""{name} was ignored""" )
continue
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split('.*.' )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE_ = 'weight'
elif "running_mean" in name:
SCREAMING_SNAKE_CASE_ = 'running_mean'
elif "running_var" in name:
SCREAMING_SNAKE_CASE_ = 'running_var'
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE_ = 'num_batches_tracked'
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('.' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE_ = SpeechTaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE_ = config.max_text_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToText(_SCREAMING_SNAKE_CASE )
elif task == "t2s":
SCREAMING_SNAKE_CASE_ = 1_876
SCREAMING_SNAKE_CASE_ = 600
SCREAMING_SNAKE_CASE_ = config.max_speech_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForTextToSpeech(_SCREAMING_SNAKE_CASE )
elif task == "s2s":
SCREAMING_SNAKE_CASE_ = 1_876
SCREAMING_SNAKE_CASE_ = config.max_speech_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToSpeech(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
SCREAMING_SNAKE_CASE_ = SpeechTaTokenizer(_SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken('<mask>' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = SpeechTaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.load(_SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint['model'] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase__ : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 620 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Any = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 620 | 1 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __snake_case ( nn.Module ):
def __init__( self):
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(3 , 4)
SCREAMING_SNAKE_CASE_ = nn.BatchNormad(4)
SCREAMING_SNAKE_CASE_ = nn.Linear(4 , 5)
def lowerCAmelCase__ ( self , _A):
return self.lineara(self.batchnorm(self.lineara(_A)))
class __snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict())
SCREAMING_SNAKE_CASE_ = os.path.join(_A , 'index.json')
self.assertTrue(os.path.isfile(_A))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
SCREAMING_SNAKE_CASE_ = os.path.join(_A , f"""{key}.dat""")
self.assertTrue(os.path.isfile(_A))
# TODO: add tests on the fact weights are properly loaded
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
SCREAMING_SNAKE_CASE_ = torch.randn(2 , 3 , dtype=_A)
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = offload_weight(_A , 'weight' , _A , {})
SCREAMING_SNAKE_CASE_ = os.path.join(_A , 'weight.dat')
self.assertTrue(os.path.isfile(_A))
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A).split('.')[1]}})
SCREAMING_SNAKE_CASE_ = load_offloaded_weight(_A , index['weight'])
self.assertTrue(torch.equal(_A , _A))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = ModelForTest()
SCREAMING_SNAKE_CASE_ = model.state_dict()
SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'linear2' not in k}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A)
SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A)
# Every key is there with the right value
self.assertEqual(sorted(_A) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key]))
SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'weight' in k}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A)
SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A)
# Every key is there with the right value
self.assertEqual(sorted(_A) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A)
# Duplicates are removed
SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A)
# Every key is there with the right value
self.assertEqual(sorted(_A) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key]))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = {'a.1': 0, 'a.10': 1, 'a.2': 2}
SCREAMING_SNAKE_CASE_ = extract_submodules_state_dict(_A , ['a.1', 'a.2'])
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2})
SCREAMING_SNAKE_CASE_ = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
SCREAMING_SNAKE_CASE_ = extract_submodules_state_dict(_A , ['a.1', 'a.2'])
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2})
| 620 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __snake_case ( unittest.TestCase ):
__lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__lowerCAmelCase : Tuple = ['accelerate', 'launch']
__lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate'
__lowerCAmelCase : List[str] = 'default_config.yaml'
__lowerCAmelCase : List[Any] = config_folder / config_file
__lowerCAmelCase : str = config_folder / '_default_config.yaml'
__lowerCAmelCase : Optional[int] = Path('tests/test_configs' )
@classmethod
def lowerCAmelCase__ ( cls):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def lowerCAmelCase__ ( cls):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def lowerCAmelCase__ ( self):
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=_A):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy())
def lowerCAmelCase__ ( self):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy())
class __snake_case ( unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = 'test-tpu'
__lowerCAmelCase : str = 'us-central1-a'
__lowerCAmelCase : Union[str, Any] = 'ls'
__lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config']
__lowerCAmelCase : Union[str, Any] = 'cd /usr/share'
__lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh'
__lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
| 620 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase__ : Dict = "src/diffusers"
# Matches is_xxx_available()
UpperCamelCase__ : List[Any] = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
UpperCamelCase__ : Dict = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
UpperCamelCase__ : List[str] = "\n{0} = None\n"
UpperCamelCase__ : List[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
UpperCamelCase__ : int = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = _re_backend.findall(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {}
# Go through the end of the file
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE_ = []
# Until we unindent, add backend objects to the list
while line_index < len(_SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE_ = lines[line_index]
SCREAMING_SNAKE_CASE_ = _re_single_line_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE_ = objects
else:
line_index += 1
return backend_specific_objects
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE_ = {}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE_ = '[' + ', '.join(f"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']'
SCREAMING_SNAKE_CASE_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for o in objects] )
SCREAMING_SNAKE_CASE_ = dummy_file
return dummy_files
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE_ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , 'utils' )
SCREAMING_SNAKE_CASE_ = {
backend: os.path.join(_SCREAMING_SNAKE_CASE , f"""dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py""" )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
else:
SCREAMING_SNAKE_CASE_ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py as the main """
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f"""diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` """
'to fix this.' )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase__ : int = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 620 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ : Tuple = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 620 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase__ : str = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase__ : List[Any] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase__ : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase__ : Any = sorted(arg_to_scheduler.keys())
UpperCamelCase__ : str = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __snake_case ( pl.LightningModule ):
def __init__( self , _A , _A=None , _A="base" , _A=None , _A=None , _A=None , **_A , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = Path(self.hparams.output_dir)
SCREAMING_SNAKE_CASE_ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A):
assert hasattr(self.config , _A), f"""model config doesn't have a `{p}` attribute"""
setattr(self.config , _A , getattr(self.hparams , _A))
if tokenizer is None:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = MODEL_MODES[mode]
if model is None:
SCREAMING_SNAKE_CASE_ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path) , config=self.config , cache_dir=_A , )
else:
SCREAMING_SNAKE_CASE_ = model
def lowerCAmelCase__ ( self , *_A , **_A):
SCREAMING_SNAKE_CASE_ = self.model_type.from_pretrained(*_A , **_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = arg_to_scheduler[self.hparams.lr_scheduler]
SCREAMING_SNAKE_CASE_ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
SCREAMING_SNAKE_CASE_ = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model
SCREAMING_SNAKE_CASE_ = ['bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE_ = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
SCREAMING_SNAKE_CASE_ = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A)
else:
SCREAMING_SNAKE_CASE_ = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
SCREAMING_SNAKE_CASE_ = optimizer
SCREAMING_SNAKE_CASE_ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCAmelCase__ ( self , _A , _A):
return self.validation_step(_A , _A)
def lowerCAmelCase__ ( self , _A):
return self.validation_end(_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
SCREAMING_SNAKE_CASE_ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCAmelCase__ ( self , _A):
if stage == "test":
SCREAMING_SNAKE_CASE_ = len(self.test_dataloader().dataset)
else:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A)
SCREAMING_SNAKE_CASE_ = len(self.train_dataloader().dataset)
def lowerCAmelCase__ ( self , _A , _A , _A = False):
raise NotImplementedError('You must implement this for your task')
def lowerCAmelCase__ ( self):
return self.train_loader
def lowerCAmelCase__ ( self):
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A)
def lowerCAmelCase__ ( self):
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A)
def lowerCAmelCase__ ( self , _A):
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/'))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def lowerCAmelCase__ ( self , _A):
SCREAMING_SNAKE_CASE_ = self.output_dir.joinpath('best_tfmr')
SCREAMING_SNAKE_CASE_ = self.step_count
self.model.save_pretrained(_A)
self.tokenizer.save_pretrained(_A)
@staticmethod
def lowerCAmelCase__ ( _A , _A):
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name')
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A).parent / 'test_run' / 'cache') , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.')
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A)
parser.add_argument('--train_batch_size' , default=32 , type=_A)
parser.add_argument('--eval_batch_size' , default=32 , type=_A)
parser.add_argument('--adafactor' , action='store_true')
class __snake_case ( pl.Callback ):
def lowerCAmelCase__ ( self , _A , _A):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __snake_case ( pl.Callback ):
def lowerCAmelCase__ ( self , _A , _A):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A)
class __snake_case ( pl.Callback ):
def lowerCAmelCase__ ( self , _A , _A):
SCREAMING_SNAKE_CASE_ = trainer.lr_schedulers[0]['scheduler']
SCREAMING_SNAKE_CASE_ = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(_A)
def lowerCAmelCase__ ( self , _A , _A):
rank_zero_info('***** Validation results *****')
SCREAMING_SNAKE_CASE_ = trainer.callback_metrics
# Log results
for key in sorted(_A):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key])))
def lowerCAmelCase__ ( self , _A , _A):
rank_zero_info('***** Test results *****')
SCREAMING_SNAKE_CASE_ = trainer.callback_metrics
# Log and save results to file
SCREAMING_SNAKE_CASE_ = os.path.join(pl_module.hparams.output_dir , 'test_results.txt')
with open(_A , 'w') as writer:
for key in sorted(_A):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key])))
writer.write('{} = {}\n'.format(_A , str(metrics[key])))
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'model_checkpoints' ) , type=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_SCREAMING_SNAKE_CASE , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_SCREAMING_SNAKE_CASE , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-train-data' ) , type=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : BaseTransformer , _SCREAMING_SNAKE_CASE : argparse.Namespace , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , **_SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
SCREAMING_SNAKE_CASE_ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
SCREAMING_SNAKE_CASE_ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_SCREAMING_SNAKE_CASE )
if logging_callback is None:
SCREAMING_SNAKE_CASE_ = LoggingCallback()
SCREAMING_SNAKE_CASE_ = {}
if args.fpaa:
SCREAMING_SNAKE_CASE_ = 16
if args.gpus > 1:
SCREAMING_SNAKE_CASE_ = 'auto'
SCREAMING_SNAKE_CASE_ = 'ddp'
SCREAMING_SNAKE_CASE_ = args.accumulate_grad_batches
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 'auto'
SCREAMING_SNAKE_CASE_ = pl.Trainer.from_argparse_args(
_SCREAMING_SNAKE_CASE , weights_summary=_SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **_SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(_SCREAMING_SNAKE_CASE )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 620 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase__ : int = Lock()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 620 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = SamImageProcessor()
SCREAMING_SNAKE_CASE_ = SamProcessor(_A)
processor.save_pretrained(self.tmpdirname)
def lowerCAmelCase__ ( self , **_A):
return AutoProcessor.from_pretrained(self.tmpdirname , **_A).image_processor
def lowerCAmelCase__ ( self):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_A , padding_value=1.0)
SCREAMING_SNAKE_CASE_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A)
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='np')
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
@require_torch
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A)
SCREAMING_SNAKE_CASE_ = [torch.ones((1, 3, 5, 5))]
SCREAMING_SNAKE_CASE_ = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ = [[683, 1024]]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , _A , _A)
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646))
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , torch.tensor(_A) , torch.tensor(_A))
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646))
# should also work with np
SCREAMING_SNAKE_CASE_ = [np.ones((1, 3, 5, 5))]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , np.array(_A) , np.array(_A))
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646))
SCREAMING_SNAKE_CASE_ = [[1, 0], [0, 1]]
with self.assertRaises(_A):
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , np.array(_A) , np.array(_A))
@require_vision
@require_tf
class __snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = SamImageProcessor()
SCREAMING_SNAKE_CASE_ = SamProcessor(_A)
processor.save_pretrained(self.tmpdirname)
def lowerCAmelCase__ ( self , **_A):
return AutoProcessor.from_pretrained(self.tmpdirname , **_A).image_processor
def lowerCAmelCase__ ( self):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_A , padding_value=1.0)
SCREAMING_SNAKE_CASE_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A)
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='np')
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
@require_tf
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A)
SCREAMING_SNAKE_CASE_ = [tf.ones((1, 3, 5, 5))]
SCREAMING_SNAKE_CASE_ = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ = [[683, 1024]]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , _A , _A , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646))
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , tf.convert_to_tensor(_A) , tf.convert_to_tensor(_A) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646))
# should also work with np
SCREAMING_SNAKE_CASE_ = [np.ones((1, 3, 5, 5))]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , np.array(_A) , np.array(_A) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646))
SCREAMING_SNAKE_CASE_ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , np.array(_A) , np.array(_A) , return_tensors='tf')
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = SamImageProcessor()
SCREAMING_SNAKE_CASE_ = SamProcessor(_A)
processor.save_pretrained(self.tmpdirname)
def lowerCAmelCase__ ( self , **_A):
return AutoProcessor.from_pretrained(self.tmpdirname , **_A).image_processor
def lowerCAmelCase__ ( self):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A)
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
SCREAMING_SNAKE_CASE_ = [tf.convert_to_tensor(_A)]
SCREAMING_SNAKE_CASE_ = [torch.tensor(_A)]
SCREAMING_SNAKE_CASE_ = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ = [[683, 1024]]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , _A , _A , return_tensors='tf')
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , _A , _A , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A)
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='pt')['pixel_values'].numpy()
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='pt')['pixel_values'].numpy()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='tf')['pixel_values'].numpy()
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(_A , _A))
self.assertTrue(np.allclose(_A , _A))
self.assertTrue(np.allclose(_A , _A))
| 620 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __snake_case ( unittest.TestCase , lowerCAmelCase__ ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering')
self.tool.setup()
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
| 620 | 1 |
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return number | (1 << position)
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return number & ~(1 << position)
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return number ^ (1 << position)
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_reduce_labels
def lowerCAmelCase__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] )
return image, map
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __snake_case ( lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self)
@property
def lowerCAmelCase__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , 'do_resize'))
self.assertTrue(hasattr(_A , 'size'))
self.assertTrue(hasattr(_A , 'do_center_crop'))
self.assertTrue(hasattr(_A , 'center_crop'))
self.assertTrue(hasattr(_A , 'do_normalize'))
self.assertTrue(hasattr(_A , 'image_mean'))
self.assertTrue(hasattr(_A , 'image_std'))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 20, 'width': 20})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
self.assertEqual(image_processor.do_reduce_labels , _A)
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
self.assertEqual(image_processor.do_reduce_labels , _A)
def lowerCAmelCase__ ( self):
pass
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A)
for image in image_inputs:
self.assertIsInstance(_A , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A)
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A)
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A)
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 150)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
| 620 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.