code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCamelCase__ = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ :Any = 10
def __SCREAMING_SNAKE_CASE ( self : str , **__a : int ) -> str:
_UpperCamelCase : Dict = {
"num_train_timesteps": 1100,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__a )
return config
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : List[Any] = self.scheduler_classes[0]
_UpperCamelCase : Any = self.get_scheduler_config()
_UpperCamelCase : Dict = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : List[str] = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = self.dummy_model()
_UpperCamelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : str = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : List[str] = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : int = model(__a , __a )
_UpperCamelCase : Dict = scheduler.step(__a , __a , __a , generator=__a )
_UpperCamelCase : Optional[int] = output.prev_sample
_UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
_UpperCamelCase : Any = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 10.08_07 ) < 1e-2
assert abs(result_mean.item() - 0.01_31 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : Dict = self.scheduler_classes[0]
_UpperCamelCase : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : List[Any] = self.dummy_model()
_UpperCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : Tuple = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Tuple = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : Dict = model(__a , __a )
_UpperCamelCase : int = scheduler.step(__a , __a , __a , generator=__a )
_UpperCamelCase : Any = output.prev_sample
_UpperCamelCase : Optional[Any] = torch.sum(torch.abs(__a ) )
_UpperCamelCase : str = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 0.00_02 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config()
_UpperCamelCase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
_UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCamelCase : str = self.dummy_model()
_UpperCamelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCamelCase : str = sample.to(__a )
for t in scheduler.timesteps:
_UpperCamelCase : str = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : Tuple = model(__a , __a )
_UpperCamelCase : Optional[int] = scheduler.step(__a , __a , __a , generator=__a )
_UpperCamelCase : int = output.prev_sample
_UpperCamelCase : List[Any] = torch.sum(torch.abs(__a ) )
_UpperCamelCase : List[Any] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 10.08_07 ) < 1e-2
assert abs(result_mean.item() - 0.01_31 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
_UpperCamelCase : int = self.scheduler_classes[0]
_UpperCamelCase : str = self.get_scheduler_config()
_UpperCamelCase : int = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
_UpperCamelCase : List[str] = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = self.dummy_model()
_UpperCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCamelCase : List[str] = sample.to(__a )
for t in scheduler.timesteps:
_UpperCamelCase : Optional[int] = scheduler.scale_model_input(__a , __a )
_UpperCamelCase : Tuple = model(__a , __a )
_UpperCamelCase : Dict = scheduler.step(__a , __a , __a , generator=__a )
_UpperCamelCase : Union[str, Any] = output.prev_sample
_UpperCamelCase : List[str] = torch.sum(torch.abs(__a ) )
_UpperCamelCase : Any = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1e-3
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *__a : Tuple , __a : Union[str, Any]=None , __a : str=None , **__a : Optional[Any] ) -> Tuple:
super().__init__(*__a , **__a )
_UpperCamelCase : List[Any] = eval_examples
_UpperCamelCase : Tuple = post_process_function
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Optional[Any]=None , __a : str=None , __a : Optional[int]=None , __a : str = "eval" ) -> Tuple:
_UpperCamelCase : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCamelCase : Tuple = self.get_eval_dataloader(__a )
_UpperCamelCase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : Dict = self.compute_metrics
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCamelCase : List[str] = time.time()
try:
_UpperCamelCase : Dict = eval_loop(
__a , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
_UpperCamelCase : Any = compute_metrics
_UpperCamelCase : List[Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCamelCase : Union[str, Any] = self.post_process_function(__a , __a , output.predictions )
_UpperCamelCase : Optional[int] = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_UpperCamelCase : str = metrics.pop(__a )
metrics.update(output.metrics )
else:
_UpperCamelCase : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def __SCREAMING_SNAKE_CASE ( self : Any , __a : int , __a : Dict , __a : List[Any]=None , __a : str = "test" ) -> str:
_UpperCamelCase : List[Any] = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : int = self.compute_metrics
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCamelCase : List[str] = time.time()
try:
_UpperCamelCase : str = eval_loop(
__a , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
_UpperCamelCase : Optional[Any] = compute_metrics
_UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCamelCase : Tuple = self.post_process_function(__a , __a , output.predictions , "predict" )
_UpperCamelCase : Dict = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_UpperCamelCase : Tuple = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , __a : bool , __a : Optional[int] = None , __a : Optional[int] = None ) -> Any:
super().__init__()
_UpperCamelCase : Any = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCamelCase : int = torch.zeros(__a , __a )
else:
_UpperCamelCase : List[str] = None
_UpperCamelCase : str = torch.nn.Parameter(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :VQModel
SCREAMING_SNAKE_CASE__ :CLIPTextModel
SCREAMING_SNAKE_CASE__ :CLIPTokenizer
SCREAMING_SNAKE_CASE__ :TransformeraDModel
SCREAMING_SNAKE_CASE__ :LearnedClassifierFreeSamplingEmbeddings
SCREAMING_SNAKE_CASE__ :VQDiffusionScheduler
def __init__( self : int , __a : VQModel , __a : CLIPTextModel , __a : CLIPTokenizer , __a : TransformeraDModel , __a : VQDiffusionScheduler , __a : LearnedClassifierFreeSamplingEmbeddings , ) -> List[Any]:
super().__init__()
self.register_modules(
vqvae=__a , transformer=__a , text_encoder=__a , tokenizer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Any , __a : Optional[Any] , __a : str ) -> List[str]:
_UpperCamelCase : int = len(__a ) if isinstance(__a , __a ) else 1
# get prompt text embeddings
_UpperCamelCase : Dict = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_UpperCamelCase : Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_UpperCamelCase : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCamelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCamelCase : int = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__a )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase : List[str] = prompt_embeds.repeat_interleave(__a , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCamelCase : Any = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCamelCase : str = negative_prompt_embeds.unsqueeze(0 ).repeat(__a , 1 , 1 )
else:
_UpperCamelCase : Tuple = [""] * batch_size
_UpperCamelCase : Any = text_input_ids.shape[-1]
_UpperCamelCase : Optional[int] = self.tokenizer(
__a , padding="max_length" , max_length=__a , truncation=__a , return_tensors="pt" , )
_UpperCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCamelCase : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase : Tuple = negative_prompt_embeds.shape[1]
_UpperCamelCase : Dict = negative_prompt_embeds.repeat(1 , __a , 1 )
_UpperCamelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , __a : Union[str, List[str]] , __a : int = 100 , __a : float = 5.0 , __a : float = 1.0 , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__a , __a ):
_UpperCamelCase : Tuple = 1
elif isinstance(__a , __a ):
_UpperCamelCase : List[Any] = len(__a )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__a )}''' )
_UpperCamelCase : Optional[Any] = batch_size * num_images_per_prompt
_UpperCamelCase : Optional[int] = guidance_scale > 1.0
_UpperCamelCase : int = self._encode_prompt(__a , __a , __a )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__a )}.''' )
# get the initial completely masked latents unless the user supplied it
_UpperCamelCase : Union[str, Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCamelCase : Any = self.transformer.num_vector_embeds - 1
_UpperCamelCase : List[str] = torch.full(__a , __a ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_UpperCamelCase : List[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__a , device=self.device )
_UpperCamelCase : Dict = self.scheduler.timesteps.to(self.device )
_UpperCamelCase : Dict = latents
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the sample if we are doing classifier free guidance
_UpperCamelCase : int = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCamelCase : str = self.transformer(__a , encoder_hidden_states=__a , timestep=__a ).sample
if do_classifier_free_guidance:
_UpperCamelCase, _UpperCamelCase : Dict = model_output.chunk(2 )
_UpperCamelCase : str = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__a , dim=1 , keepdim=__a )
_UpperCamelCase : Any = self.truncate(__a , __a )
# remove `log(0)`'s (`-inf`s)
_UpperCamelCase : int = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase : Dict = self.scheduler.step(__a , timestep=__a , sample=__a , generator=__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = self.vqvae.config.vq_embed_dim
_UpperCamelCase : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCamelCase : Tuple = self.vqvae.quantize.get_codebook_entry(__a , shape=__a )
_UpperCamelCase : int = self.vqvae.decode(__a , force_not_quantize=__a ).sample
_UpperCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase : Dict = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : torch.FloatTensor , __a : float ) -> torch.FloatTensor:
_UpperCamelCase, _UpperCamelCase : str = torch.sort(__a , 1 , descending=__a )
_UpperCamelCase : Union[str, Any] = torch.exp(__a )
_UpperCamelCase : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCamelCase : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , __a )
_UpperCamelCase : List[Any] = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCamelCase : List[str] = keep_mask[:, :-1, :]
_UpperCamelCase : Union[str, Any] = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCamelCase : List[Any] = log_p_x_0.clone()
_UpperCamelCase : Tuple = -torch.inf # -inf = log(0)
return rv
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase__ ( lowercase_ = "" ) -> dict[str, float]:
"""simple docstring"""
_UpperCamelCase : int = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
_UpperCamelCase : Optional[int] = BeautifulSoup(requests.get(lowercase_ ).text ,"html.parser" )
_UpperCamelCase : Tuple = soup.find_all("td" ,attrs="titleColumn" )
_UpperCamelCase : List[str] = soup.find_all("td" ,class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ ,lowercase_ )
}
def lowercase__ ( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None:
"""simple docstring"""
_UpperCamelCase : int = get_imdb_top_aaa_movies()
with open(lowercase_ ,"w" ,newline="" ) as out_file:
_UpperCamelCase : Union[str, Any] = csv.writer(lowercase_ )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = "vit_mae"
def __init__( self : List[Any] , __a : Optional[int]=768 , __a : List[Any]=12 , __a : Optional[Any]=12 , __a : Optional[int]=3072 , __a : int="gelu" , __a : str=0.0 , __a : Union[str, Any]=0.0 , __a : Tuple=0.02 , __a : Optional[int]=1e-1_2 , __a : Any=224 , __a : Dict=16 , __a : int=3 , __a : str=True , __a : Dict=16 , __a : Optional[int]=512 , __a : List[Any]=8 , __a : Optional[int]=2048 , __a : Tuple=0.75 , __a : Optional[Any]=False , **__a : int , ) -> str:
super().__init__(**__a )
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : Dict = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : List[str] = qkv_bias
_UpperCamelCase : List[str] = decoder_num_attention_heads
_UpperCamelCase : Any = decoder_hidden_size
_UpperCamelCase : Optional[int] = decoder_num_hidden_layers
_UpperCamelCase : Optional[Any] = decoder_intermediate_size
_UpperCamelCase : str = mask_ratio
_UpperCamelCase : Optional[Any] = norm_pix_loss
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase__ = re.compile(R"\s+")
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(lowercase_ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = [len(lowercase_ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(lowercase_ ), "line_max": max(lowercase_ )}
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_=5 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = ["auto-generated", "autogenerated", "automatically generated"]
_UpperCamelCase : Union[str, Any] = example["content"].splitlines()
for _, line in zip(range(lowercase_ ) ,lowercase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowercase__ ( lowercase_ ,lowercase_=5 ,lowercase_=0.05 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[Any] = ["unit tests", "test file", "configuration file"]
_UpperCamelCase : List[str] = example["content"].splitlines()
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Union[str, Any] = 0
# first test
for _, line in zip(range(lowercase_ ) ,lowercase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCamelCase : Dict = example["content"].count("\n" )
_UpperCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["def ", "class ", "for ", "while "]
_UpperCamelCase : Dict = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowercase__ ( lowercase_ ,lowercase_=4 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = example["content"].splitlines()
_UpperCamelCase : int = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = tokenizer(example["content"] ,truncation=lowercase_ )["input_ids"]
_UpperCamelCase : Optional[Any] = len(example["content"] ) / len(lowercase_ )
return {"ratio": ratio}
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {}
results.update(get_hash(lowercase_ ) )
results.update(line_stats(lowercase_ ) )
results.update(alpha_stats(lowercase_ ) )
results.update(char_token_ratio(lowercase_ ) )
results.update(is_autogenerated(lowercase_ ) )
results.update(is_config_or_test(lowercase_ ) )
results.update(has_no_keywords(lowercase_ ) )
results.update(has_few_assignments(lowercase_ ) )
return results
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if not check_uniques(lowercase_ ,lowercase_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
with open(lowercase_ ,"rb" ) as f_in:
with gzip.open(str(lowercase_ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(lowercase_ ,lowercase_ )
os.unlink(lowercase_ )
# Settings
lowerCamelCase__ = HfArgumentParser(PreprocessingArguments)
lowerCamelCase__ = parser.parse_args()
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase__ = time.time()
lowerCamelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowerCamelCase__ = time.time()
lowerCamelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowerCamelCase__ = set(ds.unique("hash"))
lowerCamelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowerCamelCase__ = time.time()
lowerCamelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase__ = time.time()
lowerCamelCase__ , lowerCamelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowerCamelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
lowerCamelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
lowerCamelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
lowerCamelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Tuple , ) -> None:
super().__init__(**__a )
_UpperCamelCase : List[Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : List[str] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : Optional[int] = do_resize
_UpperCamelCase : Any = size
_UpperCamelCase : Tuple = do_center_crop
_UpperCamelCase : str = crop_size
_UpperCamelCase : List[Any] = resample
_UpperCamelCase : Any = do_rescale
_UpperCamelCase : int = rescale_factor
_UpperCamelCase : str = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : int , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : Any = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Union[str, Any] = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Any = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : List[str] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Tuple = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Tuple = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Dict = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : Optional[Any] = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : Tuple = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Optional[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Dict = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : int = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : int = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : Any = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : List[Any] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : List[str] = make_batched(__a )
_UpperCamelCase : int = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : Any = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ = {
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = (
list(range(ord("!" ) ,ord("~" ) + 1 ) ) + list(range(ord("¡" ) ,ord("¬" ) + 1 ) ) + list(range(ord("®" ) ,ord("ÿ" ) + 1 ) )
)
_UpperCamelCase : List[str] = bs[:]
_UpperCamelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase : str = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ ,lowercase_ ) )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = set()
_UpperCamelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase : Dict = char
return pairs
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int]="replace" , __a : Any="<s>" , __a : Dict="</s>" , __a : List[str]="</s>" , __a : int="<s>" , __a : Any="<unk>" , __a : str="<pad>" , __a : Any="<mask>" , __a : List[Any]=False , **__a : List[str] , ) -> str:
_UpperCamelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
_UpperCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
_UpperCamelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
_UpperCamelCase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
_UpperCamelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
_UpperCamelCase : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="utf-8" ) as vocab_handle:
_UpperCamelCase : int = json.load(__a )
_UpperCamelCase : str = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : int = errors # how to handle errors in decoding
_UpperCamelCase : str = bytes_to_unicode()
_UpperCamelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
_UpperCamelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
_UpperCamelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : Any = {}
_UpperCamelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return len(self.encoder )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Optional[Any] ) -> Tuple:
if token in self.cache:
return self.cache[token]
_UpperCamelCase : Optional[int] = tuple(__a )
_UpperCamelCase : List[Any] = get_pairs(__a )
if not pairs:
return token
while True:
_UpperCamelCase : List[str] = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = bigram
_UpperCamelCase : Dict = []
_UpperCamelCase : Optional[Any] = 0
while i < len(__a ):
try:
_UpperCamelCase : Dict = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase : str = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase : str = tuple(__a )
_UpperCamelCase : Optional[int] = new_word
if len(__a ) == 1:
break
else:
_UpperCamelCase : Any = get_pairs(__a )
_UpperCamelCase : str = " ".join(__a )
_UpperCamelCase : Dict = word
return word
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] ) -> Tuple:
_UpperCamelCase : str = []
for token in re.findall(self.pat , __a ):
_UpperCamelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(" " ) )
return bpe_tokens
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : int ) -> Tuple:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict ) -> Union[str, Any]:
return self.decoder.get(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Tuple ) -> str:
_UpperCamelCase : Tuple = "".join(__a )
_UpperCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Tuple = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Dict = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
_UpperCamelCase : str = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_UpperCamelCase : Union[str, Any] = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Optional[int] = [self.sep_token_id]
_UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str , __a : int=False , **__a : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
_UpperCamelCase : List[Any] = " " + text
return (text, kwargs)
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[Dict[str, EncodedInput], BatchEncoding] , __a : Optional[int] = None , __a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __a : Optional[int] = None , __a : Optional[bool] = None , ) -> dict:
_UpperCamelCase : Any = super()._pad(
encoded_inputs=__a , max_length=__a , padding_strategy=__a , pad_to_multiple_of=__a , return_attention_mask=__a , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase : str = len(encoded_inputs["global_attention_mask"] ) != len(__a )
if needs_to_be_padded:
_UpperCamelCase : List[str] = len(__a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase : str = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase__ = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
# A mock response for an HTTP head request to emulate server down
_UpperCamelCase : List[Any] = mock.Mock()
_UpperCamelCase : Any = 500
_UpperCamelCase : Dict = {}
_UpperCamelCase : List[Any] = HTTPError
_UpperCamelCase : Optional[int] = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
# This test is for deprecated behavior and can be removed in v5
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any ) -> Dict:
_UpperCamelCase : List[str] = TOKEN
HfFolder.save_token(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = WavaVecaFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
_UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__a , repo_id="test-feature-extractor" , push_to_hub=__a , use_auth_token=self._token )
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__a , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__a , use_auth_token=self._token )
_UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a , getattr(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
CustomFeatureExtractor.register_for_auto_class()
_UpperCamelCase : Any = CustomFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_UpperCamelCase : List[str] = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> float:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> float:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
lowercase_ ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , __a : int = 3 , __a : int = 3 , __a : Tuple[str] = ("DownEncoderBlock2D",) , __a : Tuple[str] = ("UpDecoderBlock2D",) , __a : Tuple[int] = (64,) , __a : int = 1 , __a : str = "silu" , __a : int = 3 , __a : int = 32 , __a : int = 256 , __a : int = 32 , __a : Optional[int] = None , __a : float = 0.1_82_15 , __a : str = "group" , ) -> Optional[int]:
super().__init__()
# pass init params to Encoder
_UpperCamelCase : List[str] = Encoder(
in_channels=__a , out_channels=__a , down_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , double_z=__a , )
_UpperCamelCase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_UpperCamelCase : int = nn.Convad(__a , __a , 1 )
_UpperCamelCase : List[str] = VectorQuantizer(__a , __a , beta=0.25 , remap=__a , sane_index_shape=__a )
_UpperCamelCase : List[str] = nn.Convad(__a , __a , 1 )
# pass init params to Decoder
_UpperCamelCase : Optional[Any] = Decoder(
in_channels=__a , out_channels=__a , up_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , norm_type=__a , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self : str , __a : torch.FloatTensor , __a : bool = True ) -> VQEncoderOutput:
_UpperCamelCase : Optional[int] = self.encoder(__a )
_UpperCamelCase : Tuple = self.quant_conv(__a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__a )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : torch.FloatTensor , __a : bool = False , __a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = self.quantize(__a )
else:
_UpperCamelCase : str = h
_UpperCamelCase : Optional[Any] = self.post_quant_conv(__a )
_UpperCamelCase : int = self.decoder(__a , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : torch.FloatTensor , __a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_UpperCamelCase : Tuple = sample
_UpperCamelCase : int = self.encode(__a ).latents
_UpperCamelCase : int = self.decode(__a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : List[str] = psutil.Process()
_UpperCamelCase : Tuple = False
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : Tuple = -1
while True:
_UpperCamelCase : int = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Any = True
_UpperCamelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
_UpperCamelCase : Dict = True
self.thread.start()
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : str = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ = PeakCPUMemory()
def lowercase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase : List[str] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCamelCase : Optional[int] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCamelCase : Any = torch.cuda.memory_allocated(lowercase_ )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCamelCase : List[Any] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
_UpperCamelCase : Tuple = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCamelCase : List[str] = (torch.cuda.memory_allocated(lowercase_ ) - start_measures[str(lowercase_ )]) / 2**20
_UpperCamelCase : List[str] = (torch.cuda.max_memory_allocated(lowercase_ ) - start_measures[str(lowercase_ )]) / 2**20
return measures
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(lowercase_ )]:.2f}MiB''' )
_UpperCamelCase : Union[str, Any] = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
import enum
import shutil
import sys
lowerCamelCase__ , lowerCamelCase__ = shutil.get_terminal_size()
lowerCamelCase__ = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = 0
SCREAMING_SNAKE_CASE__ :Tuple = 1
def lowercase__ ( lowercase_ ,lowercase_="" ) -> Union[str, Any]:
"""simple docstring"""
sys.stdout.write(str(lowercase_ ) + end )
sys.stdout.flush()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_="" ) -> Optional[int]:
"""simple docstring"""
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' ,lowercase_ )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
forceWrite("\r" )
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_UpperCamelCase, _UpperCamelCase : Any = True, True
_UpperCamelCase : Optional[int] = dfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
return path
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = 0
_UpperCamelCase : Tuple = -1
for i in range(lowercase_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_UpperCamelCase : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : List[str] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_UpperCamelCase, _UpperCamelCase : Any = check_circuit_or_path(lowercase_ ,lowercase_ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
_UpperCamelCase : int = 1
if check == 2:
_UpperCamelCase : Optional[Any] = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
_UpperCamelCase : Any = dfs(lowercase_ ,lowercase_ ,lowercase_ )
print(lowercase_ )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_UpperCamelCase : Any = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_UpperCamelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_UpperCamelCase : Optional[int] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_UpperCamelCase : Tuple = {
1: [],
2: []
# all degree is zero
}
_UpperCamelCase : Optional[int] = 10
check_euler(lowercase_ ,lowercase_ )
check_euler(lowercase_ ,lowercase_ )
check_euler(lowercase_ ,lowercase_ )
check_euler(lowercase_ ,lowercase_ )
check_euler(lowercase_ ,lowercase_ )
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , __a : Dict , __a : Optional[int]=3 , __a : Optional[Any]=32 , __a : int=3 , __a : Tuple=10 , __a : List[Any]=[10, 20, 30, 40] , __a : Tuple=[1, 1, 2, 1] , __a : Optional[int]=True , __a : List[str]=True , __a : List[Any]="relu" , __a : Union[str, Any]=3 , __a : List[Any]=None , ) -> Dict:
_UpperCamelCase : int = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : List[Any] = embeddings_size
_UpperCamelCase : List[Any] = hidden_sizes
_UpperCamelCase : Optional[Any] = depths
_UpperCamelCase : Dict = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : str = num_labels
_UpperCamelCase : List[Any] = scope
_UpperCamelCase : Tuple = len(__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : List[Any] , __a : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RegNetModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : str = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] ) -> str:
_UpperCamelCase : Tuple = self.num_labels
_UpperCamelCase : Optional[int] = RegNetForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :Tuple = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : List[Any] = RegNetModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=__a , has_text_modality=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Tuple = [*signature.parameters.keys()]
_UpperCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(config=__a )
for name, module in model.named_modules():
if isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
def check_hidden_states_output(__a : Union[str, Any] , __a : Any , __a : int ):
_UpperCamelCase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : int = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : int = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Tuple = layer_type
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[int] = True
check_hidden_states_output(__a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[Any] = RegNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Union[str, Any] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_UpperCamelCase : str = gray_code_sequence_string(lowercase_ )
#
# convert them to integers
for i in range(len(lowercase_ ) ):
_UpperCamelCase : Union[str, Any] = int(sequence[i] ,2 )
return sequence
def lowercase__ ( lowercase_ ) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCamelCase : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCamelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_UpperCamelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCamelCase : Dict = "0" + smaller_sequence[i]
sequence.append(lowercase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCamelCase : Tuple = "1" + smaller_sequence[i]
sequence.append(lowercase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
from math import loga
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase_ ,lowercase_ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
import string
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = ""
for i in sequence:
_UpperCamelCase : List[str] = ord(lowercase_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = string.ascii_letters
_UpperCamelCase : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase_ )] if c in letters else c for c in sequence )
def lowercase__ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running performance benchmarks..." )
_UpperCamelCase : List[Any] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' ,setup=lowercase_ )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' ,setup=lowercase_ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : str , __a : Union[str, Any]=13 , __a : str=[30, 30] , __a : int=2 , __a : List[Any]=3 , __a : Optional[int]=True , __a : List[str]=True , __a : Optional[int]=32 , __a : Optional[int]=5 , __a : List[str]=4 , __a : Tuple=37 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : Optional[int]=0.1 , __a : Dict=10 , __a : int=0.02 , __a : Dict=3 , __a : Any=None , __a : Any=8 , __a : Optional[Any]=10 , ) -> Dict:
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : List[Any] = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Optional[int] = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Tuple = use_labels
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = type_sequence_label_size
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : str = num_labels
_UpperCamelCase : Optional[Any] = scope
_UpperCamelCase : Optional[int] = n_targets
_UpperCamelCase : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_UpperCamelCase : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_UpperCamelCase : Optional[int] = num_patches + 1 + self.num_detection_tokens
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_UpperCamelCase : str = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_UpperCamelCase : str = []
for i in range(self.batch_size ):
_UpperCamelCase : str = {}
_UpperCamelCase : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__a )
_UpperCamelCase : List[str] = torch.rand(self.n_targets , 4 , device=__a )
labels.append(__a )
_UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Optional[int] , __a : int , __a : Dict ) -> Dict:
_UpperCamelCase : int = YolosModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Dict , __a : int ) -> Dict:
_UpperCamelCase : Union[str, Any] = YolosForObjectDetection(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = model(pixel_values=__a )
_UpperCamelCase : int = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_UpperCamelCase : Dict = model(pixel_values=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
_UpperCamelCase : int = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :int = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str , __a : Dict , __a : Union[str, Any] , __a : int=False ) -> Dict:
_UpperCamelCase : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_UpperCamelCase : int = []
for i in range(self.model_tester.batch_size ):
_UpperCamelCase : Dict = {}
_UpperCamelCase : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=__a , dtype=torch.long )
_UpperCamelCase : List[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=__a , dtype=torch.float )
labels.append(__a )
_UpperCamelCase : Optional[Any] = labels
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase : int = YolosModelTester(self )
_UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
# YOLOS does not use inputs_embeds
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
_UpperCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : str = [*signature.parameters.keys()]
_UpperCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[Any] = True
# in YOLOS, the seq_len is different
_UpperCamelCase : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : str = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Any = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCamelCase : List[str] = len(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = True
_UpperCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : str = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(__a ) )
_UpperCamelCase : List[Any] = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
def check_hidden_states_output(__a : List[Any] , __a : List[str] , __a : Union[str, Any] ):
_UpperCamelCase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : str = outputs.hidden_states
_UpperCamelCase : Union[str, Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
# YOLOS has a different seq_length
_UpperCamelCase : Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Union[str, Any] = YolosModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : int = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__a )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Tuple = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
_UpperCamelCase : List[str] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Optional[Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=__a , )
_UpperCamelCase : Optional[int] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 ) )
# verify postprocessing
_UpperCamelCase : List[str] = image_processor.post_process_object_detection(
__a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_UpperCamelCase : Any = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(__a )
_UpperCamelCase : Union[str, Any] = [75, 75, 17, 63, 17]
_UpperCamelCase : Dict = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(__a )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , __a , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , __a )
self.assertTrue(torch.allclose(results["boxes"][0, :] , __a ) )
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 16_000 ) -> str:
"""simple docstring"""
_UpperCamelCase : int = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
_UpperCamelCase : Dict = randint(0 ,len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[str] = field(default=_UpperCamelCase , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "A file containing the training audio paths and labels."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "A file containing the validation audio paths and labels."} )
SCREAMING_SNAKE_CASE__ :str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE__ :str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
SCREAMING_SNAKE_CASE__ :str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
SCREAMING_SNAKE_CASE__ :str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ :float = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
SCREAMING_SNAKE_CASE__ :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[bool] = field(
default=_UpperCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , __a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" ,lowercase_ ,lowercase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : int = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_UpperCamelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_UpperCamelCase : Dict = DatasetDict()
_UpperCamelCase : Any = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : Any = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_UpperCamelCase : int = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_UpperCamelCase : Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ ):
_UpperCamelCase : Dict = []
for audio in batch[data_args.audio_column_name]:
_UpperCamelCase : Dict = random_subsample(
audio["array"] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
_UpperCamelCase : List[Any] = feature_extractor(lowercase_ ,sampling_rate=feature_extractor.sampling_rate )
_UpperCamelCase : Optional[int] = {model_input_name: inputs.get(lowercase_ )}
_UpperCamelCase : Tuple = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ ):
_UpperCamelCase : Optional[Any] = [audio["array"] for audio in batch[data_args.audio_column_name]]
_UpperCamelCase : Optional[int] = feature_extractor(lowercase_ ,sampling_rate=feature_extractor.sampling_rate )
_UpperCamelCase : List[Any] = {model_input_name: inputs.get(lowercase_ )}
_UpperCamelCase : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCamelCase : Union[str, Any] = raw_datasets["train"].features[data_args.label_column_name].names
_UpperCamelCase, _UpperCamelCase : int = {}, {}
for i, label in enumerate(lowercase_ ):
_UpperCamelCase : int = str(lowercase_ )
_UpperCamelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCamelCase : List[Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
_UpperCamelCase : Any = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=lowercase_ ,references=eval_pred.label_ids )
_UpperCamelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(lowercase_ ) ,labelaid=lowercase_ ,idalabel=lowercase_ ,finetuning_task="audio-classification" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : Optional[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=lowercase_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCamelCase : Union[str, Any] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ ,output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCamelCase : int = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ ,output_all_columns=lowercase_ )
# Initialize our trainer
_UpperCamelCase : Optional[Any] = Trainer(
model=lowercase_ ,args=lowercase_ ,train_dataset=raw_datasets["train"] if training_args.do_train else None ,eval_dataset=raw_datasets["eval"] if training_args.do_eval else None ,compute_metrics=lowercase_ ,tokenizer=lowercase_ ,)
# Training
if training_args.do_train:
_UpperCamelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : Tuple = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("train" ,train_result.metrics )
trainer.save_metrics("train" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCamelCase : List[str] = trainer.evaluate()
trainer.log_metrics("eval" ,lowercase_ )
trainer.save_metrics("eval" ,lowercase_ )
# Write model card and (optionally) push to hub
_UpperCamelCase : Dict = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , __a : int ) -> List[Any]:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = hidden_states.shape
_UpperCamelCase : Dict = jax.image.resize(
__a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
_UpperCamelCase : Tuple = self.conv(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , __a : Any ) -> Any:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_UpperCamelCase : Union[str, Any] = self.conv(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :int = None
SCREAMING_SNAKE_CASE__ :float = 0.0
SCREAMING_SNAKE_CASE__ :bool = None
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : List[str] = self.in_channels if self.out_channels is None else self.out_channels
_UpperCamelCase : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase : List[str] = nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : Union[str, Any] = nn.Dense(__a , dtype=self.dtype )
_UpperCamelCase : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase : Dict = nn.Dropout(self.dropout_prob )
_UpperCamelCase : List[str] = nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCamelCase : List[str] = None
if use_nin_shortcut:
_UpperCamelCase : List[Any] = nn.Conv(
__a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : Any=True ) -> Dict:
_UpperCamelCase : Optional[int] = hidden_states
_UpperCamelCase : List[Any] = self.norma(__a )
_UpperCamelCase : Any = nn.swish(__a )
_UpperCamelCase : str = self.conva(__a )
_UpperCamelCase : Any = self.time_emb_proj(nn.swish(__a ) )
_UpperCamelCase : Any = jnp.expand_dims(jnp.expand_dims(__a , 1 ) , 1 )
_UpperCamelCase : str = hidden_states + temb
_UpperCamelCase : Any = self.norma(__a )
_UpperCamelCase : Optional[Any] = nn.swish(__a )
_UpperCamelCase : Optional[Any] = self.dropout(__a , __a )
_UpperCamelCase : List[str] = self.conva(__a )
if self.conv_shortcut is not None:
_UpperCamelCase : str = self.conv_shortcut(__a )
return hidden_states + residual
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCamelCase__ = datasets.utils.logging.get_logger(__name__)
lowerCamelCase__ = ["names", "prefix"]
lowerCamelCase__ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowerCamelCase__ = ["encoding_errors", "on_bad_lines"]
lowerCamelCase__ = ["date_format"]
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ","
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :Optional[Union[int, List[int], str]] = "infer"
SCREAMING_SNAKE_CASE__ :Optional[List[str]] = None
SCREAMING_SNAKE_CASE__ :Optional[List[str]] = None
SCREAMING_SNAKE_CASE__ :Optional[Union[int, str, List[int], List[str]]] = None
SCREAMING_SNAKE_CASE__ :Optional[Union[List[int], List[str]]] = None
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :bool = True
SCREAMING_SNAKE_CASE__ :Optional[Literal["c", "python", "pyarrow"]] = None
SCREAMING_SNAKE_CASE__ :Dict[Union[int, str], Callable[[Any], Any]] = None
SCREAMING_SNAKE_CASE__ :Optional[list] = None
SCREAMING_SNAKE_CASE__ :Optional[list] = None
SCREAMING_SNAKE_CASE__ :bool = False
SCREAMING_SNAKE_CASE__ :Optional[Union[int, List[int]]] = None
SCREAMING_SNAKE_CASE__ :Optional[int] = None
SCREAMING_SNAKE_CASE__ :Optional[Union[str, List[str]]] = None
SCREAMING_SNAKE_CASE__ :bool = True
SCREAMING_SNAKE_CASE__ :bool = True
SCREAMING_SNAKE_CASE__ :bool = False
SCREAMING_SNAKE_CASE__ :bool = True
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :str = "."
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :str = '"'
SCREAMING_SNAKE_CASE__ :int = 0
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :bool = True
SCREAMING_SNAKE_CASE__ :bool = True
SCREAMING_SNAKE_CASE__ :int = 0
SCREAMING_SNAKE_CASE__ :bool = True
SCREAMING_SNAKE_CASE__ :bool = False
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :int = 10_000
SCREAMING_SNAKE_CASE__ :Optional[datasets.Features] = None
SCREAMING_SNAKE_CASE__ :Optional[str] = "strict"
SCREAMING_SNAKE_CASE__ :Literal["error", "warn", "skip"] = "error"
SCREAMING_SNAKE_CASE__ :Optional[str] = None
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
if self.delimiter is not None:
_UpperCamelCase : str = self.delimiter
if self.column_names is not None:
_UpperCamelCase : List[str] = self.column_names
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase : Tuple = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = CsvConfig
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[Any] ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_UpperCamelCase : Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
_UpperCamelCase : Optional[Any] = data_files
if isinstance(__a , __a ):
_UpperCamelCase : Dict = [files]
_UpperCamelCase : List[Any] = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCamelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
_UpperCamelCase : List[str] = [files]
_UpperCamelCase : List[Any] = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) )
return splits
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : pa.Table ) -> pa.Table:
if self.config.features is not None:
_UpperCamelCase : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(__a ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCamelCase : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCamelCase : str = table_cast(__a , __a )
return pa_table
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Dict ) -> str:
_UpperCamelCase : List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCamelCase : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
_UpperCamelCase : str = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__a ):
_UpperCamelCase : int = pa.Table.from_pandas(__a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__a )}: {e}''' )
raise
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = VideoToVideoSDPipeline
SCREAMING_SNAKE_CASE__ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
SCREAMING_SNAKE_CASE__ :Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
SCREAMING_SNAKE_CASE__ :Any = PipelineTesterMixin.required_optional_params - {"latents"}
SCREAMING_SNAKE_CASE__ :Any = False
# No `output_type`.
SCREAMING_SNAKE_CASE__ :Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_UpperCamelCase : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
_UpperCamelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
_UpperCamelCase : Any = CLIPTextModel(__a )
_UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Optional[int] , __a : List[Any]=0 ) -> int:
# 3 frames
_UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
_UpperCamelCase : Optional[Any] = torch.manual_seed(__a )
else:
_UpperCamelCase : Any = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : List[str] = VideoToVideoSDPipeline(**__a )
_UpperCamelCase : Dict = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Tuple = self.get_dummy_inputs(__a )
_UpperCamelCase : str = "np"
_UpperCamelCase : Optional[Any] = sd_pipe(**__a ).frames
_UpperCamelCase : List[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_UpperCamelCase : Optional[Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : List[Any] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCamelCase : Optional[int] = torch.randn((1, 10, 3, 1024, 576) , generator=__a )
_UpperCamelCase : Dict = video.to("cuda" )
_UpperCamelCase : int = "Spiderman is surfing"
_UpperCamelCase : List[Any] = pipe(__a , video=__a , generator=__a , num_inference_steps=3 , output_type="pt" ).frames
_UpperCamelCase : List[Any] = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
from math import factorial
def lowercase__ ( lowercase_ = 100 ) -> int:
"""simple docstring"""
return sum(int(lowercase_ ) for x in str(factorial(lowercase_ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = CTRLTokenizer
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : Tuple = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
_UpperCamelCase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : Any = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
_UpperCamelCase : List[Any] = {"unk_token": "<unk>"}
_UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , **__a : List[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : int ) -> List[Any]:
_UpperCamelCase : str = "adapt react readapt apt"
_UpperCamelCase : str = "adapt react readapt apt"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : List[str] = "adapt react readapt apt"
_UpperCamelCase : Dict = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
_UpperCamelCase : List[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : List[str] = tokens + [tokenizer.unk_token]
_UpperCamelCase : List[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = "dpt"
def __init__( self : Optional[Any] , __a : Union[str, Any]=768 , __a : Tuple=12 , __a : Any=12 , __a : Any=3072 , __a : Dict="gelu" , __a : int=0.0 , __a : int=0.0 , __a : Tuple=0.02 , __a : str=1e-1_2 , __a : Dict=384 , __a : Optional[Any]=16 , __a : List[str]=3 , __a : List[str]=False , __a : Optional[Any]=True , __a : str=[2, 5, 8, 11] , __a : int="project" , __a : Union[str, Any]=[4, 2, 1, 0.5] , __a : Optional[int]=[96, 192, 384, 768] , __a : List[str]=256 , __a : Tuple=-1 , __a : Union[str, Any]=False , __a : Dict=True , __a : int=0.4 , __a : List[str]=255 , __a : Optional[Any]=0.1 , __a : Any=[1, 1024, 24, 24] , __a : Optional[Any]=[0, 1] , __a : Any=None , **__a : str , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : List[str] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCamelCase : int = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_UpperCamelCase : Union[str, Any] = BitConfig(**__a )
elif isinstance(__a , __a ):
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCamelCase : int = BitConfig(**__a )
elif isinstance(__a , __a ):
_UpperCamelCase : List[str] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_UpperCamelCase : str = backbone_featmap_shape
_UpperCamelCase : Tuple = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : Tuple = None
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Optional[Any] = patch_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : List[Any] = qkv_bias
_UpperCamelCase : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
_UpperCamelCase : Tuple = readout_type
_UpperCamelCase : Optional[Any] = reassemble_factors
_UpperCamelCase : Union[str, Any] = neck_hidden_sizes
_UpperCamelCase : int = fusion_hidden_size
_UpperCamelCase : List[str] = head_in_index
_UpperCamelCase : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCamelCase : Union[str, Any] = use_auxiliary_head
_UpperCamelCase : List[Any] = auxiliary_loss_weight
_UpperCamelCase : int = semantic_loss_ignore_index
_UpperCamelCase : Dict = semantic_classifier_dropout
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCamelCase : Any = self.backbone_config.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["MobileNetV2FeatureExtractor"]
lowerCamelCase__ = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
return len(set(lowercase_ ) ) == len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" ,["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" ,["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" ,[None, "v2"] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = hf_hub_url(repo_id=lowercase_ ,path=lowercase_ ,revision=lowercase_ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(lowercase_ )}'''
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
if curr_ind == len(lowercase_ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 ,len(lowercase_ ) ):
if valid_connection(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
# Insert current vertex into path as next transition
_UpperCamelCase : Any = next_ver
# Validate created path
if util_hamilton_cycle(lowercase_ ,lowercase_ ,curr_ind + 1 ):
return True
# Backtrack
_UpperCamelCase : str = -1
return False
def lowercase__ ( lowercase_ ,lowercase_ = 0 ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : Dict = [-1] * (len(lowercase_ ) + 1)
# initialize start and end of path with starting index
_UpperCamelCase : Union[str, Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowercase_ ,lowercase_ ,1 ) else []
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase__ ( lowercase_=None ) -> Any:
"""simple docstring"""
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser("test" )
else:
_UpperCamelCase : str = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=lowercase_ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCamelCase : int = script_name
else:
_UpperCamelCase : Dict = F'''--config_file={args.config_file} {script_name}'''
_UpperCamelCase : str = ["accelerate-launch"] + test_args.split()
_UpperCamelCase : Tuple = execute_subprocess_async(lowercase_ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : int = test_command_parser()
_UpperCamelCase : Tuple = parser.parse_args()
test_command(lowercase_ )
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Dict = len(lowercase_ ) // 2
# choose the middle 3 elements
_UpperCamelCase : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ = {
"camembert-base": 512,
}
lowerCamelCase__ = "▁"
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :Tuple = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ :Tuple = CamembertTokenizer
def __init__( self : List[Any] , __a : int=None , __a : str=None , __a : Optional[int]="<s>" , __a : Dict="</s>" , __a : List[Any]="</s>" , __a : List[str]="<s>" , __a : List[str]="<unk>" , __a : List[Any]="<pad>" , __a : List[Any]="<mask>" , __a : Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **__a : Optional[Any] , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
_UpperCamelCase : List[Any] = vocab_file
_UpperCamelCase : Tuple = False if not self.vocab_file else True
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Optional[int] = [self.cls_token_id]
_UpperCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self : str , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : List[Any] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowercase__ ( lowercase_=None ,lowercase_=None ) -> int:
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase_ )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(
metadata={"help": "The csv file to plot."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
SCREAMING_SNAKE_CASE__ :Optional[List[str]] = list_field(
default=_UpperCamelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
try:
int(lowercase_ )
return True
except ValueError:
return False
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
try:
float(lowercase_ )
return True
except ValueError:
return False
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __a : Dict ) -> Optional[int]:
_UpperCamelCase : List[str] = args
_UpperCamelCase : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
_UpperCamelCase : Optional[int] = csv.DictReader(__a )
for row in reader:
_UpperCamelCase : Tuple = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
_UpperCamelCase : Dict = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
_UpperCamelCase : Tuple = float(row["result"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = plt.subplots()
_UpperCamelCase : List[str] = "Time usage" if self.args.is_time else "Memory usage"
_UpperCamelCase : Union[str, Any] = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCamelCase : Optional[Any] = sorted(set(self.result_dict[model_name]["bsz"] ) )
_UpperCamelCase : Dict = sorted(set(self.result_dict[model_name]["seq_len"] ) )
_UpperCamelCase : Any = self.result_dict[model_name]["result"]
((_UpperCamelCase), (_UpperCamelCase)) : Optional[int] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCamelCase : Tuple = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCamelCase : List[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__a , )
else:
_UpperCamelCase : int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCamelCase), (_UpperCamelCase)) : Union[str, Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
_UpperCamelCase : str = np.asarray(__a , __a )[: len(__a )]
plt.scatter(
__a , __a , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__a , __a , "--" )
title_str += F''' {label_model_name} vs.'''
_UpperCamelCase : Tuple = title_str[:-4]
_UpperCamelCase : int = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__a )
plt.xlabel(__a )
plt.ylabel(__a )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = HfArgumentParser(lowercase_ )
_UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
_UpperCamelCase : Tuple = Plot(args=lowercase_ )
plot.plot()
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *__a : Any , **__a : Any ) -> List[Any]:
super().__init__(*__a , **__a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any]=None ) -> List[str]:
_UpperCamelCase : Any = {}
if top_k is not None:
_UpperCamelCase : Optional[Any] = top_k
return {}, {}, postprocess_params
def __call__( self : List[Any] , __a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__a : str ) -> Any:
return super().__call__(__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict ) -> List[Any]:
_UpperCamelCase : Tuple = load_image(__a )
_UpperCamelCase : Any = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE ( self : str , __a : int ) -> List[Any]:
_UpperCamelCase : Tuple = self.model(**__a )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str , __a : Optional[Any]=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
_UpperCamelCase : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
_UpperCamelCase : Dict = model_outputs.logits.softmax(-1 )[0]
_UpperCamelCase, _UpperCamelCase : List[Any] = probs.topk(__a )
elif self.framework == "tf":
_UpperCamelCase : Optional[int] = stable_softmax(model_outputs.logits , axis=-1 )[0]
_UpperCamelCase : int = tf.math.top_k(__a , k=__a )
_UpperCamelCase, _UpperCamelCase : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_UpperCamelCase : Union[str, Any] = scores.tolist()
_UpperCamelCase : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : Optional[int] , __a : List[str]=13 , __a : Optional[Any]=3 , __a : Optional[Any]=224 , __a : str=30 , __a : Optional[Any]=400 , __a : List[Any]=True , __a : Any=None , __a : Union[str, Any]=True , __a : Dict=[0.5, 0.5, 0.5] , __a : str=[0.5, 0.5, 0.5] , ) -> Tuple:
_UpperCamelCase : Any = size if size is not None else {"height": 18, "width": 18}
_UpperCamelCase : Dict = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Optional[int] = min_resolution
_UpperCamelCase : int = max_resolution
_UpperCamelCase : str = do_resize
_UpperCamelCase : Tuple = size
_UpperCamelCase : int = do_normalize
_UpperCamelCase : int = image_mean
_UpperCamelCase : Tuple = image_std
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase : str = EfficientFormerImageProcessorTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return self.image_proc_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
_UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
# Initialize image_processor
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Any = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
_UpperCamelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Optional[Any] = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# Initialize image_processor
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
_UpperCamelCase : List[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Tuple = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCamelCase__ = "sshleifer/mar_enro_6_3_student"
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
super().setUp()
_UpperCamelCase : int = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=__a , )
_UpperCamelCase : List[str] = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
MarianMTModel.from_pretrained(__a )
@slow
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Dict = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
_UpperCamelCase : Tuple = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
_UpperCamelCase : Any = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
_UpperCamelCase : Dict = bash_script.replace(__a , str(__a ) )
_UpperCamelCase : int = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_UpperCamelCase : Dict = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_UpperCamelCase : List[str] = ["finetune.py"] + bash_script.split() + args
with patch.object(__a , "argv" , __a ):
_UpperCamelCase : Any = argparse.ArgumentParser()
_UpperCamelCase : Any = pl.Trainer.add_argparse_args(__a )
_UpperCamelCase : str = SummarizationModule.add_model_specific_args(__a , os.getcwd() )
_UpperCamelCase : Optional[Any] = parser.parse_args()
_UpperCamelCase : List[str] = main(__a )
# Check metrics
_UpperCamelCase : Union[str, Any] = load_json(model.metrics_save_path )
_UpperCamelCase : Union[str, Any] = metrics["val"][0]
_UpperCamelCase : Dict = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __a )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_UpperCamelCase : Optional[int] = os.listdir(__a )
_UpperCamelCase : Optional[Any] = [x for x in contents if x.endswith(".ckpt" )][0]
_UpperCamelCase : Any = os.path.join(args.output_dir , __a )
_UpperCamelCase : List[str] = torch.load(__a , map_location="cpu" )
_UpperCamelCase : List[str] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_UpperCamelCase : List[Any] = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : List[str] = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_UpperCamelCase : List[Any] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
_UpperCamelCase : List[Any] = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
_UpperCamelCase : Tuple = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
_UpperCamelCase : int = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
_UpperCamelCase : Tuple = bash_script.replace(__a , str(__a ) )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : int = bash_script.replace("--fp16" , "" )
_UpperCamelCase : Optional[int] = 6
_UpperCamelCase : str = (
["distillation.py"]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"--gpus=1",
"--learning_rate=1e-3",
F'''--num_train_epochs={epochs}''',
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(__a , "argv" , __a ):
_UpperCamelCase : List[Any] = argparse.ArgumentParser()
_UpperCamelCase : Dict = pl.Trainer.add_argparse_args(__a )
_UpperCamelCase : Union[str, Any] = SummarizationDistiller.add_model_specific_args(__a , os.getcwd() )
_UpperCamelCase : int = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_UpperCamelCase : str = distill_main(__a )
# Check metrics
_UpperCamelCase : Optional[int] = load_json(model.metrics_save_path )
_UpperCamelCase : List[str] = metrics["val"][0]
_UpperCamelCase : Dict = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __a )
# check lightning ckpt can be loaded and has a reasonable statedict
_UpperCamelCase : List[str] = os.listdir(__a )
_UpperCamelCase : Tuple = [x for x in contents if x.endswith(".ckpt" )][0]
_UpperCamelCase : str = os.path.join(args.output_dir , __a )
_UpperCamelCase : Optional[int] = torch.load(__a , map_location="cpu" )
_UpperCamelCase : Union[str, Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_UpperCamelCase : Optional[int] = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : str , __a : str ) -> Tuple:
with open(__a , encoding="utf-8" ) as input_file:
_UpperCamelCase : Dict = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
_UpperCamelCase : Dict = input_file.read()
_UpperCamelCase : List[str] = regexp.search(__a )
return match
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str ) -> Union[str, Any]:
with open(__a , encoding="utf-8" ) as input_file:
_UpperCamelCase : str = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
_UpperCamelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase : int = regexp.finditer(__a )
_UpperCamelCase : Tuple = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Dict = Path("./datasets" )
_UpperCamelCase : List[str] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__a ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : List[str] = Path("./datasets" )
_UpperCamelCase : str = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__a ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "speech_to_text"
SCREAMING_SNAKE_CASE__ :Any = ["past_key_values"]
SCREAMING_SNAKE_CASE__ :int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , __a : Optional[int]=1_0000 , __a : Optional[Any]=12 , __a : Optional[int]=2048 , __a : Optional[int]=4 , __a : List[Any]=6 , __a : Optional[Any]=2048 , __a : List[str]=4 , __a : Dict=0.0 , __a : Union[str, Any]=0.0 , __a : Union[str, Any]=True , __a : int=True , __a : Union[str, Any]="relu" , __a : Union[str, Any]=256 , __a : List[str]=0.1 , __a : Any=0.0 , __a : Any=0.0 , __a : List[str]=0.02 , __a : List[str]=2 , __a : Optional[int]=True , __a : int=1 , __a : Optional[int]=0 , __a : Dict=2 , __a : Tuple=6000 , __a : Optional[Any]=1024 , __a : Union[str, Any]=2 , __a : str=(5, 5) , __a : Dict=1024 , __a : List[Any]=80 , __a : Dict=1 , **__a : Optional[int] , ) -> Optional[int]:
_UpperCamelCase : List[str] = vocab_size
_UpperCamelCase : Any = d_model
_UpperCamelCase : Tuple = encoder_ffn_dim
_UpperCamelCase : List[str] = encoder_layers
_UpperCamelCase : str = encoder_attention_heads
_UpperCamelCase : List[str] = decoder_ffn_dim
_UpperCamelCase : Any = decoder_layers
_UpperCamelCase : List[str] = decoder_attention_heads
_UpperCamelCase : Union[str, Any] = dropout
_UpperCamelCase : List[str] = attention_dropout
_UpperCamelCase : Optional[int] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : Dict = init_std
_UpperCamelCase : List[Any] = encoder_layerdrop
_UpperCamelCase : Optional[int] = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Any = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
_UpperCamelCase : Optional[Any] = num_conv_layers
_UpperCamelCase : Tuple = list(__a )
_UpperCamelCase : Optional[int] = conv_channels
_UpperCamelCase : List[Any] = input_feat_per_channel
_UpperCamelCase : List[str] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
import qiskit
def lowercase__ ( lowercase_ ,lowercase_ ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
_UpperCamelCase : str = qiskit.QuantumCircuit(lowercase_ ,lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] ,[0] )
# Execute the circuit on the simulator
_UpperCamelCase : str = qiskit.execute(lowercase_ ,lowercase_ ,shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : Optional[Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase : Any = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
_UpperCamelCase : int = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : Any = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase : Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
_UpperCamelCase : Optional[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase : List[Any] = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
# pass variant but use the non-variant filenames
_UpperCamelCase : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_UpperCamelCase : int = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCamelCase : Tuple = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : int = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_UpperCamelCase : int = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
# pass variant but use the non-variant filenames
_UpperCamelCase : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_UpperCamelCase : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def lowercase__ ( lowercase_ ,lowercase_=1_000 ) -> Union[str, Any]:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_UpperCamelCase : str = n - 1
_UpperCamelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_UpperCamelCase : List[str] = 0
while count < prec:
_UpperCamelCase : Union[str, Any] = random.randint(2 ,n - 1 )
_UpperCamelCase : Optional[int] = bin_exp_mod(lowercase_ ,lowercase_ ,lowercase_ )
if b != 1:
_UpperCamelCase : Optional[int] = True
for _ in range(lowercase_ ):
if b == n - 1:
_UpperCamelCase : Tuple = False
break
_UpperCamelCase : List[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase__ = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def lowercase__ ( ) -> None:
"""simple docstring"""
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : int , __a : int , __a : float = 0 ) -> None:
_UpperCamelCase, _UpperCamelCase : Any = row, column
_UpperCamelCase : List[str] = [[default_value for c in range(__a )] for r in range(__a )]
def __str__( self : Any ) -> str:
_UpperCamelCase : Optional[Any] = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_UpperCamelCase : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
_UpperCamelCase : Dict = max(__a , len(str(__a ) ) )
_UpperCamelCase : Dict = F'''%{max_element_length}s'''
# Make string and return
def single_line(__a : list[float] ) -> str:
nonlocal string_format_identifier
_UpperCamelCase : Any = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__a ) for row_vector in self.array )
return s
def __repr__( self : List[Any] ) -> str:
return str(self )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : tuple[int, int] ) -> bool:
if not (isinstance(__a , (list, tuple) ) and len(__a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , __a : tuple[int, int] ) -> Any:
assert self.validate_indicies(__a )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , __a : tuple[int, int] , __a : float ) -> None:
assert self.validate_indicies(__a )
_UpperCamelCase : List[Any] = value
def __add__( self : str , __a : Matrix ) -> Matrix:
assert isinstance(__a , __a )
assert self.row == another.row and self.column == another.column
# Add
_UpperCamelCase : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCamelCase : Dict = self[r, c] + another[r, c]
return result
def __neg__( self : str ) -> Matrix:
_UpperCamelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCamelCase : Tuple = -self[r, c]
return result
def __sub__( self : Any , __a : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Dict , __a : int | float | Matrix ) -> Matrix:
if isinstance(__a , (int, float) ): # Scalar multiplication
_UpperCamelCase : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCamelCase : Dict = self[r, c] * another
return result
elif isinstance(__a , __a ): # Matrix multiplication
assert self.column == another.row
_UpperCamelCase : Union[str, Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_UpperCamelCase : int = F'''Unsupported type given for another ({type(__a )})'''
raise TypeError(__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Matrix:
_UpperCamelCase : Union[str, Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_UpperCamelCase : Union[str, Any] = self[r, c]
return result
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Matrix , __a : Matrix ) -> Any:
assert isinstance(__a , __a ) and isinstance(__a , __a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_UpperCamelCase : int = v.transpose()
_UpperCamelCase : Optional[int] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : str = Matrix(3 ,3 ,0 )
for i in range(3 ):
_UpperCamelCase : Dict = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_UpperCamelCase : str = Matrix(3 ,1 ,0 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = 1, 2, -3
_UpperCamelCase : List[str] = Matrix(3 ,1 ,0 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase_ ,lowercase_ )}''' )
def lowercase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = "roformer"
def __init__( self : Tuple , __a : List[str]=5_0000 , __a : Union[str, Any]=None , __a : Tuple=768 , __a : Union[str, Any]=12 , __a : Optional[Any]=12 , __a : List[Any]=3072 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : Dict=0.1 , __a : int=1536 , __a : Union[str, Any]=2 , __a : Optional[int]=0.02 , __a : int=1e-1_2 , __a : List[str]=0 , __a : Any=False , __a : str=True , **__a : Union[str, Any] , ) -> Optional[int]:
super().__init__(pad_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size if embedding_size is None else embedding_size
_UpperCamelCase : str = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Any = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : int = layer_norm_eps
_UpperCamelCase : Optional[int] = rotary_value
_UpperCamelCase : int = use_cache
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : int = {0: "batch", 1: "sequence"}
_UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = "altclip_text_model"
def __init__( self : List[Any] , __a : Union[str, Any]=25_0002 , __a : Optional[int]=1024 , __a : str=24 , __a : Any=16 , __a : List[str]=4096 , __a : str="gelu" , __a : List[str]=0.1 , __a : str=0.1 , __a : List[str]=514 , __a : int=1 , __a : Tuple=0.02 , __a : str=0.02 , __a : Any=1e-0_5 , __a : List[Any]=1 , __a : List[Any]=0 , __a : Optional[int]=2 , __a : str="absolute" , __a : Optional[Any]=True , __a : Tuple=768 , **__a : Optional[Any] , ) -> Optional[int]:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[str] = initializer_factor
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : Tuple = position_embedding_type
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : Optional[int] = project_dim
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = "altclip_vision_model"
def __init__( self : List[str] , __a : List[str]=768 , __a : str=3072 , __a : List[Any]=512 , __a : List[str]=12 , __a : Tuple=12 , __a : Dict=3 , __a : List[str]=224 , __a : List[str]=32 , __a : List[Any]="quick_gelu" , __a : List[Any]=1e-5 , __a : List[Any]=0.0 , __a : str=0.02 , __a : Optional[Any]=1.0 , **__a : Any , ) -> Any:
super().__init__(**__a )
_UpperCamelCase : int = hidden_size
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = projection_dim
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Dict = patch_size
_UpperCamelCase : Any = image_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : List[str] = initializer_factor
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : Dict = hidden_act
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[str] , __a : Union[str, os.PathLike] , **__a : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__a )
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
_UpperCamelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__a , **__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = "altclip"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : Optional[Any] , __a : Dict=None , __a : List[str]=None , __a : List[str]=768 , __a : Optional[int]=2.65_92 , **__a : Union[str, Any] ) -> List[Any]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_UpperCamelCase : Tuple = kwargs.pop("text_config_dict" , __a )
_UpperCamelCase : Tuple = kwargs.pop("vision_config_dict" , __a )
super().__init__(**__a )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_UpperCamelCase : int = {}
# This is the complete result when using `text_config_dict`.
_UpperCamelCase : Any = AltCLIPTextConfig(**__a ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_UpperCamelCase : Optional[int] = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCamelCase : Union[str, Any] = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(__a )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_UpperCamelCase : int = {}
# This is the complete result when using `vision_config_dict`.
_UpperCamelCase : List[Any] = AltCLIPVisionConfig(**__a ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_UpperCamelCase : Dict = {
str(__a ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_UpperCamelCase : Optional[int] = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCamelCase : Optional[Any] = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(__a )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_UpperCamelCase : Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
_UpperCamelCase : Dict = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
_UpperCamelCase : Union[str, Any] = AltCLIPTextConfig(**__a )
_UpperCamelCase : Dict = AltCLIPVisionConfig(**__a )
_UpperCamelCase : Any = projection_dim
_UpperCamelCase : Union[str, Any] = logit_scale_init_value
_UpperCamelCase : Optional[Any] = 1.0
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , __a : AltCLIPTextConfig , __a : AltCLIPVisionConfig , **__a : List[str] ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[str] = self.text_config.to_dict()
_UpperCamelCase : int = self.vision_config.to_dict()
_UpperCamelCase : Optional[int] = self.__class__.model_type
return output
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Any = tempfile.mkdtemp()
_UpperCamelCase : Tuple = BlipImageProcessor()
_UpperCamelCase : Tuple = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCamelCase : str = BlipProcessor(__a , __a )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__a : Dict ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).tokenizer
def __SCREAMING_SNAKE_CASE ( self : str , **__a : List[Any] ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase : Dict = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Dict = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCamelCase : Optional[Any] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
_UpperCamelCase : Optional[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Dict = self.get_image_processor()
_UpperCamelCase : List[str] = self.get_tokenizer()
_UpperCamelCase : Optional[int] = BlipProcessor(tokenizer=__a , image_processor=__a )
_UpperCamelCase : str = self.prepare_image_inputs()
_UpperCamelCase : List[Any] = image_processor(__a , return_tensors="np" )
_UpperCamelCase : Tuple = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : Any = self.get_image_processor()
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : str = BlipProcessor(tokenizer=__a , image_processor=__a )
_UpperCamelCase : Optional[int] = "lower newer"
_UpperCamelCase : Any = processor(text=__a )
_UpperCamelCase : Any = tokenizer(__a , return_token_type_ids=__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : int = BlipProcessor(tokenizer=__a , image_processor=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[int] = self.prepare_image_inputs()
_UpperCamelCase : str = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : Any = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : int = BlipProcessor(tokenizer=__a , image_processor=__a )
_UpperCamelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase : Optional[int] = processor.batch_decode(__a )
_UpperCamelCase : int = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : str = self.get_image_processor()
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : Dict = BlipProcessor(tokenizer=__a , image_processor=__a )
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Tuple = self.prepare_image_inputs()
_UpperCamelCase : Tuple = processor(text=__a , images=__a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCamelCase : Optional[Any] = grid[0]
for row_n in range(1 ,len(lowercase_ ) ):
_UpperCamelCase : int = grid[row_n]
_UpperCamelCase : List[Any] = fill_row(lowercase_ ,lowercase_ )
_UpperCamelCase : List[str] = grid[row_n]
return grid[-1][-1]
def lowercase__ ( lowercase_ ,lowercase_ ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 ,len(lowercase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase__ = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 0
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCamelCase__ = pytest.mark.integration
lowerCamelCase__ = {"comet"}
lowerCamelCase__ = importlib.util.find_spec("fairseq") is not None
lowerCamelCase__ = {"code_eval"}
lowerCamelCase__ = os.name == "nt"
lowerCamelCase__ = {"bertscore", "frugalscore", "perplexity"}
lowerCamelCase__ = importlib.util.find_spec("transformers") is not None
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
@wraps(__A )
def wrapper(self ,lowercase_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self ,__A )
return wrapper
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
@wraps(__A )
def wrapper(self ,lowercase_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self ,__A )
return wrapper
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
@wraps(__A )
def wrapper(self ,lowercase_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self ,__A )
return wrapper
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_snake_case , _snake_case , _snake_case )
@local
class __SCREAMING_SNAKE_CASE ( parameterized.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = {}
SCREAMING_SNAKE_CASE__ :List[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Tuple ) -> Dict:
_UpperCamelCase : List[str] = "[...]"
_UpperCamelCase : int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase__ ) ).module_path )
_UpperCamelCase : List[str] = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase__ )
# check parameters
_UpperCamelCase : Union[str, Any] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCAmelCase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
_UpperCamelCase : Any = doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] ) -> str:
_UpperCamelCase : List[str] = "[...]"
_UpperCamelCase : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
_UpperCamelCase : str = doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Union[str, Any] , __a : Optional[Any] ) -> Dict:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
def load_local_metric(__a : Tuple , *__a : str , **__a : Dict ):
return load_metric(os.path.join("metrics" , lowerCAmelCase__ ) , *lowerCAmelCase__ , **lowerCAmelCase__ )
with patch("datasets.load_metric" ) as mock_load_metric:
_UpperCamelCase : Optional[Any] = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , __a : Any ) -> str:
def wrapper(__a : List[Any] ):
_UpperCamelCase : List[str] = contextmanager(lowerCAmelCase__ )
_UpperCamelCase : Tuple = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" ,"" ,"" ) # handle pytest cli flags
class __SCREAMING_SNAKE_CASE ( _snake_case ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Any ) -> int:
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
_UpperCamelCase : Tuple = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
import torch
def bert_cos_score_idf(lowercase_ ,lowercase_ ,*lowercase_ ,**lowercase_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__A ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
_UpperCamelCase : Optional[int] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
def load_from_checkpoint(lowercase_ ):
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] , *__a : Dict , **__a : str ) -> Dict:
assert len(lowerCAmelCase__ ) == 2
_UpperCamelCase : Optional[int] = [0.19, 0.92]
return scores, sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
_UpperCamelCase : Optional[Any] = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
_UpperCamelCase : List[Any] = load_from_checkpoint
yield
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = load_metric(os.path.join("metrics" ,"seqeval" ) )
_UpperCamelCase : List[Any] = "ERROR"
_UpperCamelCase : List[Any] = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__A ,match=re.escape(__A ) ):
metric.compute(predictions=[] ,references=[] ,scheme=__A )
| 701
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 0
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCamelCase__ = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
lowerCamelCase__ = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
lowerCamelCase__ = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
return float((preds == labels).mean() )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_="binary" ) -> str:
"""simple docstring"""
_UpperCamelCase : int = simple_accuracy(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[Any] = float(fa_score(y_true=_SCREAMING_SNAKE_CASE ,y_pred=_SCREAMING_SNAKE_CASE ,average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = {}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Dict = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_UpperCamelCase : Optional[int] = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_UpperCamelCase : Optional[int] = [(pred, label)]
_UpperCamelCase, _UpperCamelCase : str = [], []
for question, preds_labels in question_map.items():
_UpperCamelCase, _UpperCamelCase : Any = zip(*_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = fa_score(y_true=_SCREAMING_SNAKE_CASE ,y_pred=_SCREAMING_SNAKE_CASE ,average="macro" )
fas.append(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Dict = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
_UpperCamelCase : str = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = float(fa_score(y_true=_SCREAMING_SNAKE_CASE ,y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str , __a : Any ) -> Dict:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ , fa_avg="macro" )
elif self.config_name == "record":
_UpperCamelCase : Optional[Any] = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
_UpperCamelCase : int = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(UpperCAmelCase_ , UpperCAmelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ :str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({} )
SCREAMING_SNAKE_CASE__ :str = "text"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 703
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = "beit"
def __init__( self : Optional[Any] , __a : int=8192 , __a : str=768 , __a : Tuple=12 , __a : Optional[Any]=12 , __a : Dict=3072 , __a : Any="gelu" , __a : List[Any]=0.0 , __a : Dict=0.0 , __a : Tuple=0.02 , __a : Optional[Any]=1e-1_2 , __a : List[Any]=224 , __a : List[str]=16 , __a : Optional[int]=3 , __a : List[str]=False , __a : str=False , __a : str=False , __a : Dict=False , __a : List[Any]=0.1 , __a : str=0.1 , __a : Optional[Any]=True , __a : int=[3, 5, 7, 11] , __a : Optional[int]=[1, 2, 3, 6] , __a : Union[str, Any]=True , __a : Tuple=0.4 , __a : Optional[Any]=256 , __a : Tuple=1 , __a : Tuple=False , __a : Tuple=255 , **__a : Union[str, Any] , ) -> Any:
super().__init__(**UpperCamelCase_ )
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : Dict = hidden_act
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[Any] = layer_norm_eps
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Optional[int] = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = use_mask_token
_UpperCamelCase : int = use_absolute_position_embeddings
_UpperCamelCase : Union[str, Any] = use_relative_position_bias
_UpperCamelCase : List[Any] = use_shared_relative_position_bias
_UpperCamelCase : int = layer_scale_init_value
_UpperCamelCase : Union[str, Any] = drop_path_rate
_UpperCamelCase : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCamelCase : Dict = out_indices
_UpperCamelCase : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCamelCase : List[str] = use_auxiliary_head
_UpperCamelCase : str = auxiliary_loss_weight
_UpperCamelCase : List[Any] = auxiliary_channels
_UpperCamelCase : Union[str, Any] = auxiliary_num_convs
_UpperCamelCase : int = auxiliary_concat_input
_UpperCamelCase : Union[str, Any] = semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
return 1e-4
| 704
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 0
|
"""simple docstring"""
import math
import unittest
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
assert isinstance(_A ,_A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_A ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 705
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 0
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "mask2former"
SCREAMING_SNAKE_CASE__ :List[Any] = ["swin"]
SCREAMING_SNAKE_CASE__ :int = {"hidden_size": "hidden_dim"}
def __init__( self : str , __a : Optional[Dict] = None , __a : int = 256 , __a : int = 256 , __a : int = 256 , __a : int = 1024 , __a : str = "relu" , __a : int = 6 , __a : int = 10 , __a : int = 8 , __a : float = 0.0 , __a : int = 2048 , __a : bool = False , __a : bool = False , __a : int = 4 , __a : int = 255 , __a : int = 100 , __a : float = 0.1 , __a : float = 2.0 , __a : float = 5.0 , __a : float = 5.0 , __a : int = 1_2544 , __a : float = 3.0 , __a : float = 0.75 , __a : float = 0.02 , __a : float = 1.0 , __a : bool = True , __a : List[int] = [4, 8, 16, 32] , __a : bool = None , **__a : Optional[int] , ) -> Tuple:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
_UpperCamelCase : Tuple = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = backbone_config.pop("model_type" )
_UpperCamelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase : Dict = config_class.from_dict(UpperCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
_UpperCamelCase : Optional[Any] = backbone_config
_UpperCamelCase : List[Any] = feature_size
_UpperCamelCase : Dict = mask_feature_size
_UpperCamelCase : Optional[Any] = hidden_dim
_UpperCamelCase : Any = encoder_feedforward_dim
_UpperCamelCase : Optional[Any] = activation_function
_UpperCamelCase : Tuple = encoder_layers
_UpperCamelCase : Optional[int] = decoder_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : Optional[int] = dropout
_UpperCamelCase : Dict = dim_feedforward
_UpperCamelCase : List[str] = pre_norm
_UpperCamelCase : Dict = enforce_input_projection
_UpperCamelCase : Union[str, Any] = common_stride
_UpperCamelCase : Dict = ignore_value
_UpperCamelCase : List[Any] = num_queries
_UpperCamelCase : List[Any] = no_object_weight
_UpperCamelCase : Optional[int] = class_weight
_UpperCamelCase : Dict = mask_weight
_UpperCamelCase : Tuple = dice_weight
_UpperCamelCase : Any = train_num_points
_UpperCamelCase : str = oversample_ratio
_UpperCamelCase : Any = importance_sample_ratio
_UpperCamelCase : Union[str, Any] = init_std
_UpperCamelCase : str = init_xavier_std
_UpperCamelCase : List[Any] = use_auxiliary_loss
_UpperCamelCase : List[str] = feature_strides
_UpperCamelCase : str = output_auxiliary_logits
_UpperCamelCase : List[Any] = decoder_layers
super().__init__(**UpperCAmelCase_ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : PretrainedConfig , **__a : Any ) -> Optional[Any]:
return cls(
backbone_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase : Tuple = copy.deepcopy(self.__dict__ )
_UpperCamelCase : Union[str, Any] = self.backbone_config.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 706
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 0
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ = "src/diffusers"
lowerCamelCase__ = "."
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase__ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase__ = spec.loader.load_module()
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
return line.startswith(lowercase_ ) or len(lowercase_ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" ,lowercase_ ) is not None
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = object_name.split("." )
_UpperCamelCase : str = 0
# First let's find the module where our object lives.
_UpperCamelCase : Any = parts[i]
while i < len(lowercase_ ) and not os.path.isfile(os.path.join(lowercase_ ,F'''{module}.py''' ) ):
i += 1
if i < len(lowercase_ ):
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,parts[i] )
if i >= len(lowercase_ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(lowercase_ ,F'''{module}.py''' ) ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : int = f.readlines()
# Now let's find the class / func in the code!
_UpperCamelCase : Optional[int] = ''
_UpperCamelCase : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase_ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase_ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCamelCase : str = line_index
while line_index < len(lowercase_ ) and _should_continue(lines[line_index] ,lowercase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase : Union[str, Any] = lines[start_index:line_index]
return "".join(lowercase_ )
lowerCamelCase__ = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
lowerCamelCase__ = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
lowerCamelCase__ = re.compile(R"<FILL\s+[^>]*>")
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = code.split("\n" )
_UpperCamelCase : Optional[int] = 0
while idx < len(lowercase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase_ ):
return re.search(r"^(\s*)\S" ,lines[idx] ).groups()[0]
return ""
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : str = len(get_indent(lowercase_ ) ) > 0
if has_indent:
_UpperCamelCase : List[str] = F'''class Bla:\n{code}'''
_UpperCamelCase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=lowercase_ )
_UpperCamelCase : List[str] = black.format_str(lowercase_ ,mode=lowercase_ )
_UpperCamelCase : Any = style_docstrings_in_code(lowercase_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowercase__ ( lowercase_ ,lowercase_=False ) -> Any:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : str = f.readlines()
_UpperCamelCase : int = []
_UpperCamelCase : Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase_ ):
_UpperCamelCase : Optional[int] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCamelCase : Any = search.groups()
_UpperCamelCase : int = find_code_in_diffusers(lowercase_ )
_UpperCamelCase : Optional[int] = get_indent(lowercase_ )
_UpperCamelCase : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCamelCase : List[Any] = theoretical_indent
_UpperCamelCase : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCamelCase : Tuple = True
while line_index < len(lowercase_ ) and should_continue:
line_index += 1
if line_index >= len(lowercase_ ):
break
_UpperCamelCase : Any = lines[line_index]
_UpperCamelCase : Tuple = _should_continue(lowercase_ ,lowercase_ ) and re.search(F'''^{indent}# End copy''' ,lowercase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase : Any = lines[start_index:line_index]
_UpperCamelCase : Any = ''.join(lowercase_ )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCamelCase : str = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowercase_ ) is None]
_UpperCamelCase : str = '\n'.join(lowercase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase_ ) > 0:
_UpperCamelCase : Optional[Any] = replace_pattern.replace("with" ,"" ).split("," )
_UpperCamelCase : int = [_re_replace_pattern.search(lowercase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCamelCase : str = pattern.groups()
_UpperCamelCase : Dict = re.sub(lowercase_ ,lowercase_ ,lowercase_ )
if option.strip() == "all-casing":
_UpperCamelCase : str = re.sub(obja.lower() ,obja.lower() ,lowercase_ )
_UpperCamelCase : List[Any] = re.sub(obja.upper() ,obja.upper() ,lowercase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCamelCase : Any = blackify(lines[start_index - 1] + theoretical_code )
_UpperCamelCase : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCamelCase : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCamelCase : List[Any] = start_index + 1
if overwrite and len(lowercase_ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(lowercase_ ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lowercase_ )
return diffs
def lowercase__ ( lowercase_ = False ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = glob.glob(os.path.join(lowercase_ ,"**/*.py" ) ,recursive=lowercase_ )
_UpperCamelCase : List[str] = []
for filename in all_files:
_UpperCamelCase : Tuple = is_copy_consistent(lowercase_ ,lowercase_ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(lowercase_ ) > 0:
_UpperCamelCase : Tuple = '\n'.join(lowercase_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 707
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 0
|
"""simple docstring"""
import math
import sys
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if number != int(__A ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_UpperCamelCase : List[str] = [-1] * (number + 1)
_UpperCamelCase : Dict = 0
for i in range(1 ,number + 1 ):
_UpperCamelCase : Union[str, Any] = sys.maxsize
_UpperCamelCase : List[Any] = int(math.sqrt(__A ) )
for j in range(1 ,root + 1 ):
_UpperCamelCase : Optional[Any] = 1 + answers[i - (j**2)]
_UpperCamelCase : List[str] = min(__A ,__A )
_UpperCamelCase : Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
from __future__ import annotations
from typing import Any
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
if not postfix_notation:
return 0
_UpperCamelCase : Optional[int] = {"+", "-", "*", "/"}
_UpperCamelCase : int = []
for token in postfix_notation:
if token in operations:
_UpperCamelCase, _UpperCamelCase : int = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__lowerCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 100 ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_UpperCamelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 0
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
if isinstance(_lowerCAmelCase ,torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase ,PIL.Image.Image ):
_UpperCamelCase : List[Any] = [image]
if isinstance(image[0] ,PIL.Image.Image ):
_UpperCamelCase : Any = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_UpperCamelCase : List[str] = np.concatenate(_lowerCAmelCase ,axis=0 )
_UpperCamelCase : Tuple = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
_UpperCamelCase : Optional[Any] = image.transpose(0 ,3 ,1 ,2 )
_UpperCamelCase : List[str] = 2.0 * image - 1.0
_UpperCamelCase : List[Any] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] ,torch.Tensor ):
_UpperCamelCase : List[Any] = torch.cat(_lowerCAmelCase ,dim=0 )
return image
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=0.9995 ) -> Any:
"""simple docstring"""
if not isinstance(_lowerCAmelCase ,np.ndarray ):
_UpperCamelCase : Tuple = True
_UpperCamelCase : Any = va.device
_UpperCamelCase : Tuple = va.cpu().numpy()
_UpperCamelCase : Tuple = va.cpu().numpy()
_UpperCamelCase : List[Any] = np.sum(va * va / (np.linalg.norm(_lowerCAmelCase ) * np.linalg.norm(_lowerCAmelCase )) )
if np.abs(_lowerCAmelCase ) > DOT_THRESHOLD:
_UpperCamelCase : List[str] = (1 - t) * va + t * va
else:
_UpperCamelCase : List[Any] = np.arccos(_lowerCAmelCase )
_UpperCamelCase : Tuple = np.sin(_lowerCAmelCase )
_UpperCamelCase : List[str] = theta_a * t
_UpperCamelCase : List[Any] = np.sin(_lowerCAmelCase )
_UpperCamelCase : List[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCamelCase : int = sin_theta_t / sin_theta_a
_UpperCamelCase : List[Any] = sa * va + sa * va
if inputs_are_torch:
_UpperCamelCase : int = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
return va
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = F.normalize(_lowerCAmelCase ,dim=-1 )
_UpperCamelCase : Union[str, Any] = F.normalize(_lowerCAmelCase ,dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
for param in model.parameters():
_UpperCamelCase : Tuple = value
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
def __init__( self : List[Any] , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __a : CLIPFeatureExtractor , __a : List[str]=None , __a : Dict=None , __a : int=None , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=__A , text_encoder=__A , clip_model=__A , tokenizer=__A , unet=__A , scheduler=__A , feature_extractor=__A , coca_model=__A , coca_tokenizer=__A , coca_transform=__A , )
_UpperCamelCase : Any = (
feature_extractor.size
if isinstance(feature_extractor.size , __A )
else feature_extractor.size["shortest_edge"]
)
_UpperCamelCase : str = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __A )
set_requires_grad(self.clip_model , __A )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
self.enable_attention_slicing(__A )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
set_requires_grad(self.vae , __A )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
set_requires_grad(self.vae , __A )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
set_requires_grad(self.unet , __A )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
set_requires_grad(self.unet , __A )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : Tuple , __a : List[str] ) -> int:
# get the original timestep using init_timestep
_UpperCamelCase : Union[str, Any] = min(int(num_inference_steps * strength ) , __A )
_UpperCamelCase : int = max(num_inference_steps - init_timestep , 0 )
_UpperCamelCase : Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Optional[Any]=None ) -> str:
if not isinstance(__A , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__A )}''' )
_UpperCamelCase : List[str] = image.to(device=__A , dtype=__A )
if isinstance(__A , __A ):
_UpperCamelCase : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__A )
]
_UpperCamelCase : List[str] = torch.cat(__A , dim=0 )
else:
_UpperCamelCase : Union[str, Any] = self.vae.encode(__A ).latent_dist.sample(__A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : int = 0.1_82_15 * init_latents
_UpperCamelCase : int = init_latents.repeat_interleave(__A , dim=0 )
_UpperCamelCase : List[Any] = randn_tensor(init_latents.shape , generator=__A , device=__A , dtype=__A )
# get latents
_UpperCamelCase : Union[str, Any] = self.scheduler.add_noise(__A , __A , __A )
_UpperCamelCase : Union[str, Any] = init_latents
return latents
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : str = self.coca_transform(__A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCamelCase : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_UpperCamelCase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : Tuple ) -> List[str]:
_UpperCamelCase : Optional[int] = self.feature_extractor.preprocess(__A )
_UpperCamelCase : Optional[Any] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
_UpperCamelCase : Dict = self.clip_model.get_image_features(__A )
_UpperCamelCase : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__A )
_UpperCamelCase : List[str] = image_embeddings_clip.repeat_interleave(__A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[str] , __a : Union[str, Any] , __a : List[Any] , __a : Dict , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] , ) -> Union[str, Any]:
_UpperCamelCase : Dict = latents.detach().requires_grad_()
_UpperCamelCase : Dict = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
_UpperCamelCase : int = self.unet(__A , __A , encoder_hidden_states=__A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_UpperCamelCase : List[str] = self.scheduler.alphas_cumprod[timestep]
_UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCamelCase : str = torch.sqrt(__A )
_UpperCamelCase : List[Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __A ):
_UpperCamelCase : List[str] = self.scheduler.sigmas[index]
_UpperCamelCase : Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : List[Any] = 1 / 0.1_82_15 * sample
_UpperCamelCase : Any = self.vae.decode(__A ).sample
_UpperCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase : str = transforms.Resize(self.feature_extractor_size )(__A )
_UpperCamelCase : Optional[int] = self.normalize(__A ).to(latents.dtype )
_UpperCamelCase : Any = self.clip_model.get_image_features(__A )
_UpperCamelCase : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__A )
_UpperCamelCase : Union[str, Any] = spherical_dist_loss(__A , __A ).mean() * clip_guidance_scale
_UpperCamelCase : str = -torch.autograd.grad(__A , __A )[0]
if isinstance(self.scheduler , __A ):
_UpperCamelCase : List[str] = latents.detach() + grads * (sigma**2)
_UpperCamelCase : Optional[Any] = noise_pred_original
else:
_UpperCamelCase : List[Any] = noise_pred_original - torch.sqrt(__A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[int] = 512 , __a : Optional[int] = 512 , __a : float = 0.6 , __a : Optional[int] = 50 , __a : Optional[float] = 7.5 , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[float] = 100 , __a : Optional[torch.Generator] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : float = 0.8 , __a : float = 0.1 , __a : float = 0.1 , ) -> Optional[int]:
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__A , torch.Generator ) and batch_size > 1:
_UpperCamelCase : List[str] = [generator] + [None] * (batch_size - 1)
_UpperCamelCase : Dict = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_UpperCamelCase : str = [x[0] for x in coca_is_none if x[1]]
_UpperCamelCase : List[Any] = ", ".join(__A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__A ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_UpperCamelCase : List[str] = self.get_image_description(__A )
if style_prompt is None:
if len(__A ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_UpperCamelCase : List[str] = self.get_image_description(__A )
# get prompt text embeddings for content and style
_UpperCamelCase : str = self.tokenizer(
__A , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__A , return_tensors="pt" , )
_UpperCamelCase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_UpperCamelCase : Optional[int] = self.tokenizer(
__A , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__A , return_tensors="pt" , )
_UpperCamelCase : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_UpperCamelCase : Optional[int] = slerp(__A , __A , __A )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase : Optional[int] = text_embeddings.repeat_interleave(__A , dim=0 )
# set timesteps
_UpperCamelCase : List[str] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_UpperCamelCase : Union[str, Any] = {}
if accepts_offset:
_UpperCamelCase : Dict = 1
self.scheduler.set_timesteps(__A , **__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_UpperCamelCase : Any = self.get_timesteps(__A , __A , self.device )
_UpperCamelCase : Optional[int] = timesteps[:1].repeat(__A )
# Preprocess image
_UpperCamelCase : int = preprocess(__A , __A , __A )
_UpperCamelCase : Optional[int] = self.prepare_latents(
__A , __A , __A , text_embeddings.dtype , self.device , __A )
_UpperCamelCase : Tuple = preprocess(__A , __A , __A )
_UpperCamelCase : Optional[int] = self.prepare_latents(
__A , __A , __A , text_embeddings.dtype , self.device , __A )
_UpperCamelCase : Tuple = slerp(__A , __A , __A )
if clip_guidance_scale > 0:
_UpperCamelCase : Dict = self.get_clip_image_embeddings(__A , __A )
_UpperCamelCase : Dict = self.get_clip_image_embeddings(__A , __A )
_UpperCamelCase : Any = slerp(
__A , __A , __A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase : Union[str, Any] = content_text_input.input_ids.shape[-1]
_UpperCamelCase : Tuple = self.tokenizer([""] , padding="max_length" , max_length=__A , return_tensors="pt" )
_UpperCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCamelCase : str = uncond_embeddings.repeat_interleave(__A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCamelCase : List[Any] = torch.randn(__A , generator=__A , device="cpu" , dtype=__A ).to(
self.device )
else:
_UpperCamelCase : Any = torch.randn(__A , generator=__A , device=self.device , dtype=__A )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_UpperCamelCase : str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase : Any = {}
if accepts_eta:
_UpperCamelCase : List[str] = eta
# check if the scheduler accepts generator
_UpperCamelCase : str = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_UpperCamelCase : str = generator
with self.progress_bar(total=__A ):
for i, t in enumerate(__A ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase : Union[str, Any] = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
_UpperCamelCase : Optional[Any] = self.unet(__A , __A , encoder_hidden_states=__A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase : Any = noise_pred.chunk(2 )
_UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCamelCase : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCamelCase : Dict = self.cond_fn(
__A , __A , __A , __A , __A , __A , __A , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase : Any = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : List[Any] = 1 / 0.1_82_15 * latents
_UpperCamelCase : Dict = self.vae.decode(__A ).sample
_UpperCamelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase : List[str] = self.numpy_to_pil(__A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
| 711
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 0
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCamelCase__ = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
lowerCamelCase__ = "hopper-medium-v2"
lowerCamelCase__ = gym.make(env_name)
lowerCamelCase__ = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
lowerCamelCase__ = env.reset()
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 1000
lowerCamelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCamelCase__ = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCamelCase__ = env.step(denorm_actions)
lowerCamelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCamelCase__ = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 712
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
lowerCamelCase__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __SCREAMING_SNAKE_CASE ( _snake_case ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ :str = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ :str = DistilBertTokenizer
def __init__( self : Union[str, Any] , __a : Optional[int]=None , __a : List[str]=None , __a : int=True , __a : Any="[UNK]" , __a : Tuple="[SEP]" , __a : Dict="[PAD]" , __a : Optional[Any]="[CLS]" , __a : int="[MASK]" , __a : Union[str, Any]=True , __a : Tuple=None , **__a : Any , ) -> Tuple:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
_UpperCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
_UpperCamelCase : List[str] = getattr(__a , normalizer_state.pop("type" ) )
_UpperCamelCase : Dict = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : Optional[Any] = tokenize_chinese_chars
_UpperCamelCase : Optional[int] = normalizer_class(**__a )
_UpperCamelCase : Union[str, Any] = do_lower_case
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict=None ) -> Any:
_UpperCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> str:
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : Optional[str] = None ) -> Any:
_UpperCamelCase : Dict = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = 9
_UpperCamelCase : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase : List[str] = kruskal(lowerCAmelCase__ ,lowerCAmelCase__ )
_UpperCamelCase : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase__ ) == sorted(lowerCAmelCase__ )
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCamelCase__ = sys.version_info >= (3, 10)
def lowercase__ ( lowercase_=None ,lowercase_=None ) -> Optional[int]:
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=__A )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = 42
SCREAMING_SNAKE_CASE__ :List[Any] = 42
SCREAMING_SNAKE_CASE__ :Optional[Any] = 42
SCREAMING_SNAKE_CASE__ :List[Any] = 42
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = 42
SCREAMING_SNAKE_CASE__ :int = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :List[str] = True
SCREAMING_SNAKE_CASE__ :Dict = None
class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "titi"
SCREAMING_SNAKE_CASE__ :Dict = "toto"
class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "titi"
SCREAMING_SNAKE_CASE__ :List[Any] = "toto"
SCREAMING_SNAKE_CASE__ :List[str] = 42
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = "toto"
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Union[str, Any] = BasicEnum(self.foo )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "toto"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = None
SCREAMING_SNAKE_CASE__ :Tuple = field(default=__lowerCAmelCase , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :Dict = list_field(default=[] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = list_field(default=[] )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = list_field(default=[] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE__ :int = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = field()
SCREAMING_SNAKE_CASE__ :str = field()
SCREAMING_SNAKE_CASE__ :List[Any] = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
_UpperCamelCase : Dict = BasicEnum(self.required_enum )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = 42
SCREAMING_SNAKE_CASE__ :Dict = field()
SCREAMING_SNAKE_CASE__ :List[Any] = None
SCREAMING_SNAKE_CASE__ :Dict = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE__ :str = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :Optional[int] = True
SCREAMING_SNAKE_CASE__ :str = None
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = None
SCREAMING_SNAKE_CASE__ :str = field(default=__lowerCAmelCase , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE__ :Tuple = None
SCREAMING_SNAKE_CASE__ :List[str] = list_field(default=[] )
SCREAMING_SNAKE_CASE__ :List[Any] = list_field(default=[] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : argparse.ArgumentParser , __a : argparse.ArgumentParser ) -> Tuple:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCamelCase : Optional[Any] = {k: v for k, v in vars(_UpperCamelCase ).items() if k != """container"""}
_UpperCamelCase : Any = {k: v for k, v in vars(_UpperCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , _UpperCamelCase ) and yy.get("choices" , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_UpperCamelCase ) , yy["type"](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : List[Any] = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument("--bar" , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument("--baz" , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument("--flag" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="?" )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase : Any = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(_UpperCamelCase ) : Tuple = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Any = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=_UpperCamelCase )
expected.add_argument("--baz" , default="toto" , type=_UpperCamelCase , help="help message" )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="?" )
expected.add_argument("--baz" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=_UpperCamelCase , dest="baz" )
expected.add_argument("--opt" , type=_UpperCamelCase , default=_UpperCamelCase )
_UpperCamelCase : List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCamelCase : Optional[Any] = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase : str = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
_UpperCamelCase : Any = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
_UpperCamelCase : Union[str, Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
_UpperCamelCase : int = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
_UpperCamelCase : Optional[Any] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : int = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase : Dict = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
_UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCamelCase : Tuple = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
_UpperCamelCase : int = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCamelCase : List[str] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
_UpperCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "toto"
_UpperCamelCase : List[Any] = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
_UpperCamelCase : Any = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
_UpperCamelCase : str = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
_UpperCamelCase : Optional[Any] = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : Any = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=_UpperCamelCase )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_UpperCamelCase )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase : int = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCamelCase : Any = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument("--bar" , default=_UpperCamelCase , type=_UpperCamelCase , help="help message" )
expected.add_argument("--baz" , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument("--ces" , nargs="+" , default=[] , type=_UpperCamelCase )
expected.add_argument("--des" , nargs="+" , default=[] , type=_UpperCamelCase )
_UpperCamelCase : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCamelCase : Tuple = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase : int = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
_UpperCamelCase : List[Any] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : str = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : str = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument("--required_str" , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : str = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_UpperCamelCase , )
expected.add_argument("--opt" , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument("--baz" , default="toto" , type=_UpperCamelCase , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : List[str] = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
_UpperCamelCase : int = parser.parse_dict(_UpperCamelCase )[0]
_UpperCamelCase : List[Any] = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : str = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : List[str] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Union[str, Any] = os.path.join(_UpperCamelCase , "temp_json" )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
_UpperCamelCase : Dict = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Tuple = HfArgumentParser(_UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : int = os.path.join(_UpperCamelCase , "temp_yaml" )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
_UpperCamelCase : Any = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Optional[int] = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
| 715
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
def update_area_of_max_square(lowercase_ ,lowercase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCamelCase : int = update_area_of_max_square(_lowerCamelCase ,col + 1 )
_UpperCamelCase : List[str] = update_area_of_max_square(row + 1 ,col + 1 )
_UpperCamelCase : List[Any] = update_area_of_max_square(row + 1 ,_lowerCamelCase )
if mat[row][col]:
_UpperCamelCase : Any = 1 + min([right, diagonal, down] )
_UpperCamelCase : List[Any] = max(largest_square_area[0] ,_lowerCamelCase )
return sub_problem_sol
else:
return 0
_UpperCamelCase : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase_ ,lowercase_ ,lowercase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCamelCase : List[str] = update_area_of_max_square_using_dp_array(_lowerCamelCase ,col + 1 ,_lowerCamelCase )
_UpperCamelCase : List[str] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,_lowerCamelCase )
_UpperCamelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 ,_lowerCamelCase ,_lowerCamelCase )
if mat[row][col]:
_UpperCamelCase : int = 1 + min([right, diagonal, down] )
_UpperCamelCase : Any = max(largest_square_area[0] ,_lowerCamelCase )
_UpperCamelCase : Union[str, Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCamelCase : int = [0]
_UpperCamelCase : Tuple = [[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 ,0 ,_lowerCamelCase )
return largest_square_area[0]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCamelCase : Any = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
_UpperCamelCase : str = dp_array[row][col + 1]
_UpperCamelCase : int = dp_array[row + 1][col + 1]
_UpperCamelCase : Dict = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCamelCase : Tuple = 1 + min(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_UpperCamelCase : Optional[Any] = max(dp_array[row][col] ,_lowerCamelCase )
else:
_UpperCamelCase : List[str] = 0
return largest_square_area
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [0] * (cols + 1)
_UpperCamelCase : List[Any] = [0] * (cols + 1)
_UpperCamelCase : str = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
_UpperCamelCase : Optional[Any] = current_row[col + 1]
_UpperCamelCase : int = next_row[col + 1]
_UpperCamelCase : Union[str, Any] = next_row[col]
if mat[row][col] == 1:
_UpperCamelCase : Optional[int] = 1 + min(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_UpperCamelCase : Union[str, Any] = max(current_row[col] ,_lowerCamelCase )
else:
_UpperCamelCase : str = 0
_UpperCamelCase : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 716
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = 11
_UpperCamelCase : int = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase ,_lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase ,_lowerCamelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
_UpperCamelCase : Tuple = 10
return solutions
def lowercase__ ( lowercase_ = 2 ) -> Any:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
_UpperCamelCase : Dict = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 717
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 0
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "vision-encoder-decoder"
SCREAMING_SNAKE_CASE__ :int = True
def __init__( self : List[Any] , **__a : Tuple ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCamelCase : List[str] = kwargs.pop("encoder" )
_UpperCamelCase : Optional[int] = encoder_config.pop("model_type" )
_UpperCamelCase : Any = kwargs.pop("decoder" )
_UpperCamelCase : List[Any] = decoder_config.pop("model_type" )
_UpperCamelCase : Dict = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase : Optional[int] = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase : Tuple = True
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : int , __a : PretrainedConfig , __a : PretrainedConfig , **__a : List[str] ) -> PretrainedConfig:
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : str = copy.deepcopy(self.__dict__ )
_UpperCamelCase : int = self.encoder.to_dict()
_UpperCamelCase : Tuple = self.decoder.to_dict()
_UpperCamelCase : List[Any] = self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ :Optional[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : List[Any] = OrderedDict()
_UpperCamelCase : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_UpperCamelCase : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_UpperCamelCase : List[Any] = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : "PreTrainedTokenizerBase" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
_UpperCamelCase : Dict = OrderedDict()
_UpperCamelCase : Any = super().generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
_UpperCamelCase : List[str] = dummy_input['''input_ids'''].shape
_UpperCamelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCamelCase : Optional[Any] = dummy_input.pop("input_ids" )
_UpperCamelCase : str = dummy_input.pop("attention_mask" )
_UpperCamelCase : List[str] = torch.zeros(lowerCamelCase_ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> None:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , __a : str = "default" ) -> OnnxConfig:
_UpperCamelCase : Union[str, Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase_ , lowerCamelCase_ )
| 718
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 0
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase : Optional[int] = cva.getAffineTransform(_A ,_A )
return cva.warpAffine(_A ,_A ,(rows, cols) )
if __name__ == "__main__":
# read original image
lowerCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
lowerCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCamelCase__ = gray_img.shape
# set different points to rotate image
lowerCamelCase__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
lowerCamelCase__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
lowerCamelCase__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
lowerCamelCase__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
lowerCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCamelCase__ = plt.figure(1)
lowerCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 719
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 0
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = (UnCLIPScheduler,)
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Dict ) -> Tuple:
_UpperCamelCase : Optional[int] = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**snake_case_ )
return config
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=snake_case_ , prev_timestep=snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.scheduler_classes[0]
_UpperCamelCase : int = self.get_scheduler_config(variance_type="fixed_small_log" )
_UpperCamelCase : List[Any] = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1e-5
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : str = self.get_scheduler_config(variance_type="learned_range" )
_UpperCamelCase : str = scheduler_class(**snake_case_ )
_UpperCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=snake_case_ ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=snake_case_ ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=snake_case_ ) - -0.0_01_00_11 < 1e-5
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
_UpperCamelCase : Any = self.scheduler_classes[0]
_UpperCamelCase : int = self.get_scheduler_config()
_UpperCamelCase : str = scheduler_class(**snake_case_ )
_UpperCamelCase : Any = scheduler.timesteps
_UpperCamelCase : Optional[Any] = self.dummy_model()
_UpperCamelCase : Any = self.dummy_sample_deter
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(snake_case_ ):
# 1. predict noise residual
_UpperCamelCase : Tuple = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_UpperCamelCase : int = pred_prev_sample
_UpperCamelCase : Dict = torch.sum(torch.abs(snake_case_ ) )
_UpperCamelCase : str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
_UpperCamelCase : Dict = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config()
_UpperCamelCase : int = scheduler_class(**snake_case_ )
scheduler.set_timesteps(25 )
_UpperCamelCase : str = scheduler.timesteps
_UpperCamelCase : Union[str, Any] = self.dummy_model()
_UpperCamelCase : Optional[int] = self.dummy_sample_deter
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(snake_case_ ):
# 1. predict noise residual
_UpperCamelCase : Tuple = model(snake_case_ , snake_case_ )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase : int = None
else:
_UpperCamelCase : Any = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase : Optional[Any] = scheduler.step(
snake_case_ , snake_case_ , snake_case_ , prev_timestep=snake_case_ , generator=snake_case_ ).prev_sample
_UpperCamelCase : List[Any] = pred_prev_sample
_UpperCamelCase : str = torch.sum(torch.abs(snake_case_ ) )
_UpperCamelCase : List[str] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
pass
| 720
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = '''src/transformers'''
lowerCamelCase__ = '''docs/source/en'''
lowerCamelCase__ = '''.'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : int = f.readlines()
# Find the start prompt.
_UpperCamelCase : int = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
_UpperCamelCase : Union[str, Any] = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : int = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,__UpperCamelCase )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = 2 if text == """✅""" or text == """❌""" else len(__UpperCamelCase )
_UpperCamelCase : Tuple = (width - text_length) // 2
_UpperCamelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : List[str] = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : Dict = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : List[Any] = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : str = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : str = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : Dict = collections.defaultdict(__UpperCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(__UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Dict = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : str = attr_name[:-13]
elif _re_tf_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Tuple = _re_tf_models.match(__UpperCamelCase ).groups()[0]
elif _re_flax_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Tuple = _re_flax_models.match(__UpperCamelCase ).groups()[0]
elif _re_pt_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : List[Any] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(__UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(__UpperCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Any = True
break
# Try again after removing the last word in the name
_UpperCamelCase : Tuple = """""".join(camel_case_split(__UpperCamelCase )[:-1] )
# Let's build that table!
_UpperCamelCase : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : str = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : str = [len(__UpperCamelCase ) + 2 for c in columns]
_UpperCamelCase : Dict = max([len(__UpperCamelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : List[Any] = """|""" + """|""".join([_center_text(__UpperCamelCase ,__UpperCamelCase ) for c, w in zip(__UpperCamelCase ,__UpperCamelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : List[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCamelCase : Union[str, Any] = model_name_to_prefix[name]
_UpperCamelCase : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__UpperCamelCase ,__UpperCamelCase ) for l, w in zip(__UpperCamelCase ,__UpperCamelCase )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__UpperCamelCase ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 721
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 10**9 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[Any] = 2
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_UpperCamelCase : List[str] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 700
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 0
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : List[str] , __a : Optional[Any]=14 , __a : Union[str, Any]=7 , __a : Union[str, Any]=True , __a : int=True , __a : Dict=False , __a : str=True , __a : List[str]=99 , __a : List[Any]=32 , __a : Dict=4 , __a : Optional[int]=4 , __a : str=4 , __a : List[Any]=37 , __a : List[str]="gelu" , __a : Dict=0.1 , __a : Tuple=0.1 , __a : List[str]=512 , __a : Any=0.02 , ) -> Union[str, Any]:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : Optional[Any] = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[Any] = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Tuple = vocab_size
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Any = rotary_dim
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : int = initializer_range
_UpperCamelCase : int = None
_UpperCamelCase : str = vocab_size - 1
_UpperCamelCase : List[str] = vocab_size - 1
_UpperCamelCase : Union[str, Any] = vocab_size - 1
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Dict = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Any , __a : Dict , __a : List[Any] , __a : Tuple ) -> Tuple:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Any = model_class_name(__a )
_UpperCamelCase : Tuple = model.init_cache(input_ids.shape[0] , __a )
_UpperCamelCase : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCamelCase : List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_UpperCamelCase : int = model(
input_ids[:, :-1] , attention_mask=__a , past_key_values=__a , position_ids=__a , )
_UpperCamelCase : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCamelCase : str = model(
input_ids[:, -1:] , attention_mask=__a , past_key_values=outputs_cache.past_key_values , position_ids=__a , )
_UpperCamelCase : str = model(__a )
_UpperCamelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, Any] , __a : Any , __a : int , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : Optional[Any] = 20
_UpperCamelCase : Optional[Any] = model_class_name(__a )
_UpperCamelCase : Any = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_UpperCamelCase : List[str] = model.init_cache(input_ids.shape[0] , __a )
_UpperCamelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_UpperCamelCase : str = model(
input_ids[:, :-1] , attention_mask=__a , past_key_values=__a , position_ids=__a , )
_UpperCamelCase : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCamelCase : List[str] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__a , position_ids=__a , )
_UpperCamelCase : Dict = model(__a , attention_mask=__a )
_UpperCamelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ :Dict = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[str] = FlaxGPTJModelTester(self )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__a , __a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
for model_class_name in self.all_model_classes:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__a , __a , __a , __a )
@tooslow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
_UpperCamelCase : List[str] = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__a , truncation=__a )
_UpperCamelCase : List[str] = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = model.config.eos_token_id
_UpperCamelCase : int = jax.jit(model.generate )
_UpperCamelCase : Union[str, Any] = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
_UpperCamelCase : List[Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
_UpperCamelCase : Dict = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(__a , __a )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_UpperCamelCase : str = self._prepare_for_class(__a , __a )
_UpperCamelCase : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_UpperCamelCase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCamelCase : Tuple = getattr(__a , __a )
_UpperCamelCase, _UpperCamelCase : str = pt_inputs["input_ids"].shape
_UpperCamelCase : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__a ):
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : Dict = 1
_UpperCamelCase : Any = 0
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : int = pt_model_class(__a ).eval()
_UpperCamelCase : Any = model_class(__a , dtype=jnp.floataa )
_UpperCamelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
_UpperCamelCase : List[str] = fx_state
with torch.no_grad():
_UpperCamelCase : Any = pt_model(**__a ).to_tuple()
_UpperCamelCase : Union[str, Any] = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__a , __a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
_UpperCamelCase : Optional[Any] = model_class.from_pretrained(__a , from_pt=__a )
_UpperCamelCase : int = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(
len(__a ) , len(__a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(__a , __a ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_UpperCamelCase : Optional[int] = self._prepare_for_class(__a , __a )
_UpperCamelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_UpperCamelCase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCamelCase : Dict = getattr(__a , __a )
_UpperCamelCase : str = pt_model_class(__a ).eval()
_UpperCamelCase : Any = model_class(__a , dtype=jnp.floataa )
_UpperCamelCase : Optional[int] = load_flax_weights_in_pytorch_model(__a , fx_model.params )
_UpperCamelCase, _UpperCamelCase : List[Any] = pt_inputs["input_ids"].shape
_UpperCamelCase : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__a ):
_UpperCamelCase : Any = 0
_UpperCamelCase : Dict = 1
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_UpperCamelCase : Any = pt_model(**__a ).to_tuple()
_UpperCamelCase : str = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__a , __a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
_UpperCamelCase : Optional[int] = pt_model_class.from_pretrained(__a , from_flax=__a )
with torch.no_grad():
_UpperCamelCase : int = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(
len(__a ) , len(__a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__a , __a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
for model_class_name in self.all_model_classes:
_UpperCamelCase : Optional[Any] = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
_UpperCamelCase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 701
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 0
|
from __future__ import annotations
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowercase_ ):
print(F'''{i}\t\t{d}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
for j in range(lowercase_ ):
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : List[str] = [float("inf" )] * vertex_count
_UpperCamelCase : int = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase_ ):
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
_UpperCamelCase : List[str] = distance[u] + w
_UpperCamelCase : Optional[int] = check_negative_cycle(lowercase_ ,lowercase_ ,lowercase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = int(input("Enter number of vertices: ").strip())
lowerCamelCase__ = int(input("Enter number of edges: ").strip())
lowerCamelCase__ = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
lowerCamelCase__ = {"src": src, "dst": dest, "weight": weight}
lowerCamelCase__ = int(input("\nEnter shortest path source:").strip())
lowerCamelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 0
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Any = "align_text_model"
def __init__( self : Dict , __a : Optional[Any]=3_0522 , __a : Optional[int]=768 , __a : Dict=12 , __a : Any=12 , __a : List[str]=3072 , __a : Tuple="gelu" , __a : List[Any]=0.1 , __a : List[str]=0.1 , __a : Optional[Any]=512 , __a : Optional[int]=2 , __a : int=0.02 , __a : Dict=1e-1_2 , __a : Optional[int]=0 , __a : List[Any]="absolute" , __a : str=True , **__a : Optional[Any] , ) -> Optional[Any]:
super().__init__(**__A )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : str = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Optional[int] = position_embedding_type
_UpperCamelCase : Tuple = use_cache
_UpperCamelCase : List[str] = pad_token_id
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Tuple , __a : Union[str, os.PathLike] , **__a : Optional[int] ) -> List[Any]:
cls._set_token_in_kwargs(__A )
_UpperCamelCase : Dict = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_UpperCamelCase : Any = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :int = "align_vision_model"
def __init__( self : Union[str, Any] , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.0_01 , __a : float = 0.99 , __a : float = 0.2 , **__a : Optional[int] , ) -> Union[str, Any]:
super().__init__(**__A )
_UpperCamelCase : Optional[Any] = num_channels
_UpperCamelCase : str = image_size
_UpperCamelCase : Optional[Any] = width_coefficient
_UpperCamelCase : int = depth_coefficient
_UpperCamelCase : List[str] = depth_divisor
_UpperCamelCase : int = kernel_sizes
_UpperCamelCase : str = in_channels
_UpperCamelCase : Optional[Any] = out_channels
_UpperCamelCase : Union[str, Any] = depthwise_padding
_UpperCamelCase : Dict = strides
_UpperCamelCase : Optional[int] = num_block_repeats
_UpperCamelCase : Any = expand_ratios
_UpperCamelCase : List[Any] = squeeze_expansion_ratio
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : Dict = hidden_dim
_UpperCamelCase : str = pooling_type
_UpperCamelCase : Any = initializer_range
_UpperCamelCase : List[str] = batch_norm_eps
_UpperCamelCase : Optional[Any] = batch_norm_momentum
_UpperCamelCase : Any = drop_connect_rate
_UpperCamelCase : Optional[int] = sum(__A ) * 4
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , __a : Union[str, os.PathLike] , **__a : Dict ) -> int:
cls._set_token_in_kwargs(__A )
_UpperCamelCase : Any = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_UpperCamelCase : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Optional[int] = "align"
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
def __init__( self : int , __a : List[str]=None , __a : Any=None , __a : Tuple=640 , __a : int=1.0 , __a : Tuple=0.02 , **__a : Tuple , ) -> str:
super().__init__(**__A )
if text_config is None:
_UpperCamelCase : List[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
_UpperCamelCase : Union[str, Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
_UpperCamelCase : Any = AlignTextConfig(**__A )
_UpperCamelCase : List[str] = AlignVisionConfig(**__A )
_UpperCamelCase : List[str] = projection_dim
_UpperCamelCase : Union[str, Any] = temperature_init_value
_UpperCamelCase : int = initializer_range
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , __a : AlignTextConfig , __a : AlignVisionConfig , **__a : Optional[int] ) -> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.text_config.to_dict()
_UpperCamelCase : Dict = self.vision_config.to_dict()
_UpperCamelCase : Dict = self.__class__.model_type
return output
| 703
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : int ) -> None:
_UpperCamelCase : Optional[int] = value
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , __a : Node ) -> None:
_UpperCamelCase : int = tree
def __SCREAMING_SNAKE_CASE ( self : str , __a : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Optional[Any] ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 0
|
"""simple docstring"""
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase__ = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowercase__ ( lowercase_=None ) -> List[str]:
"""simple docstring"""
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser("tpu-config" ,description=_description )
else:
_UpperCamelCase : Optional[int] = argparse.ArgumentParser("Accelerate tpu-config command" ,description=_description )
# Core arguments
_UpperCamelCase : Tuple = parser.add_argument_group(
"Config Arguments" ,"Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" ,type=lowercase_ ,default=lowercase_ ,help="Path to the config file to use for accelerate." ,)
config_args.add_argument(
"--tpu_name" ,default=lowercase_ ,help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." ,)
config_args.add_argument(
"--tpu_zone" ,default=lowercase_ ,help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." ,)
_UpperCamelCase : Tuple = parser.add_argument_group("TPU Arguments" ,"Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" ,action="store_true" ,help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." ,)
pod_args.add_argument(
"--command_file" ,default=lowercase_ ,help="The path to the file containing the commands to run on the pod on startup." ,)
pod_args.add_argument(
"--command" ,action="append" ,nargs="+" ,help="A command to run on the pod. Can be passed multiple times." ,)
pod_args.add_argument(
"--install_accelerate" ,action="store_true" ,help="Whether to install accelerate on the pod. Defaults to False." ,)
pod_args.add_argument(
"--accelerate_version" ,default="latest" ,help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." ,)
pod_args.add_argument(
"--debug" ,action="store_true" ,help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowercase_ ):
_UpperCamelCase : Tuple = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCamelCase : Any = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCamelCase : Union[str, Any] = defaults.commands
if not args.tpu_name:
_UpperCamelCase : Optional[int] = defaults.tpu_name
if not args.tpu_zone:
_UpperCamelCase : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCamelCase : List[Any] = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
_UpperCamelCase : int = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) ,lowercase_ ):
_UpperCamelCase : Tuple = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file ,"r" ) as f:
_UpperCamelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] ,lowercase_ ):
_UpperCamelCase : List[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCamelCase : int = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_UpperCamelCase : List[Any] = """; """.join(lowercase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCamelCase : Union[str, Any] = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {' '.join(lowercase_ )}''' )
return
subprocess.run(lowercase_ )
print("Successfully setup pod." )
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = tpu_command_parser()
_UpperCamelCase : str = parser.parse_args()
tpu_command_launcher(lowercase_ )
| 705
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
_UpperCamelCase : List[Any] = init[0]
_UpperCamelCase : str = init[1]
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
_UpperCamelCase : int = [[f, g, x, y]]
_UpperCamelCase : Union[str, Any] = False # flag that is set when search is complete
_UpperCamelCase : int = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_UpperCamelCase : Dict = cell.pop()
_UpperCamelCase : str = next_cell[2]
_UpperCamelCase : List[str] = next_cell[3]
_UpperCamelCase : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
_UpperCamelCase : Optional[int] = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
_UpperCamelCase : Dict = x + DIRECTIONS[i][0]
_UpperCamelCase : Dict = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_UpperCamelCase : List[str] = g + cost
_UpperCamelCase : str = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_UpperCamelCase : str = 1
_UpperCamelCase : Any = i
_UpperCamelCase : List[Any] = []
_UpperCamelCase : List[str] = goal[0]
_UpperCamelCase : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_UpperCamelCase : Any = x - DIRECTIONS[action[x][y]][0]
_UpperCamelCase : List[Any] = y - DIRECTIONS[action[x][y]][1]
_UpperCamelCase : Optional[int] = xa
_UpperCamelCase : int = ya
invpath.append([x, y] )
_UpperCamelCase : int = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase__ = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase__ = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase__ = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase__ = 99
lowerCamelCase__ , lowerCamelCase__ = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 706
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = DebertaTokenizer
SCREAMING_SNAKE_CASE__ :Any = True
SCREAMING_SNAKE_CASE__ :Any = DebertaTokenizerFast
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
_UpperCamelCase : List[Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_UpperCamelCase : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : Optional[int] = {"unk_token": "[UNK]"}
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , **__a : Any ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] ) -> Tuple:
_UpperCamelCase : Optional[int] = "lower newer"
_UpperCamelCase : Optional[Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : Dict = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : List[str] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : Tuple = tokens + [tokenizer.unk_token]
_UpperCamelCase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : Tuple = tokenizer("Hello" , "World" )
_UpperCamelCase : Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , __lowerCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
_UpperCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
_UpperCamelCase : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCAmelCase )
_UpperCamelCase : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCAmelCase )
_UpperCamelCase : Optional[int] = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_UpperCamelCase : List[str] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_UpperCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
_UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_UpperCamelCase : List[Any] = tokenizer_class.from_pretrained("microsoft/deberta-base" )
_UpperCamelCase : List[Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
_UpperCamelCase : str = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
_UpperCamelCase : Dict = [tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) for seq in encoding["input_ids"]]
# fmt: off
_UpperCamelCase : Optional[int] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCamelCase : Any = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , __lowerCAmelCase )
for expected, decoded in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 707
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase__ ) )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
if index == len(UpperCAmelCase__ ):
return True
# Recursive Step
for i in range(UpperCAmelCase__ ):
if valid_coloring(graph[index] ,UpperCAmelCase__ ,UpperCAmelCase__ ):
# Color current vertex
_UpperCamelCase : Union[str, Any] = i
# Validate coloring
if util_color(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,index + 1 ):
return True
# Backtrack
_UpperCamelCase : int = -1
return False
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = [-1] * len(UpperCAmelCase__ )
if util_color(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,0 ):
return colored_vertices
return []
| 708
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __SCREAMING_SNAKE_CASE ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :List[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ :Dict = None
def __init__( self : Optional[int] , __a : Optional[int]=None , __a : Tuple=None , __a : int=None , __a : str="<unk>" , __a : Optional[Any]="<s>" , __a : List[Any]="</s>" , __a : Optional[int]="<pad>" , __a : str=False , __a : Tuple=False , **__a : Optional[int] , ) -> Optional[int]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCamelCase : Union[str, Any] = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) )
_UpperCamelCase : Optional[int] = add_prefix_space
_UpperCamelCase : Any = pre_tok_class(**lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = add_prefix_space
def __SCREAMING_SNAKE_CASE ( self : str , *__a : List[Any] , **__a : Optional[int] ) -> BatchEncoding:
_UpperCamelCase : Dict = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__a : str , **__a : List[Any] ) -> BatchEncoding:
_UpperCamelCase : List[Any] = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[Any] , __a : int = None ) -> Tuple[str]:
_UpperCamelCase : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str ) -> List[int]:
_UpperCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
_UpperCamelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 709
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 0
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return (data["data"], data["target"])
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = XGBClassifier()
classifier.fit(UpperCAmelCase__ ,UpperCAmelCase__ )
return classifier
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = load_iris()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = data_handling(UpperCAmelCase__ )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = train_test_split(
UpperCAmelCase__ ,UpperCAmelCase__ ,test_size=0.25 )
_UpperCamelCase : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
_UpperCamelCase : List[str] = xgboost(UpperCAmelCase__ ,UpperCAmelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,display_labels=UpperCAmelCase__ ,cmap="Blues" ,normalize="true" ,)
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 710
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 0
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Tuple ) -> int:
_UpperCamelCase : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value]
_UpperCamelCase : Optional[Any] = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys]
_UpperCamelCase : Optional[int] = Counter(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] , __a : List[str]=False ) -> Tuple:
_UpperCamelCase : str = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ )
return mapping
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_UpperCamelCase : Union[str, Any] = full_content[1:].index("---" ) + 1
_UpperCamelCase : Dict = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_snake_case )
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ :Any = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Tuple , __a : Optional[int] ) -> Union[str, Any]:
with open(UpperCAmelCase_ , encoding="utf-8" ) as readme_file:
_UpperCamelCase, _UpperCamelCase : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase_ )
else:
return cls()
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any ) -> Dict:
if path.exists():
with open(UpperCAmelCase_ , encoding="utf-8" ) as readme_file:
_UpperCamelCase : List[str] = readme_file.read()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = self._to_readme(UpperCAmelCase_ )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[Any] = None ) -> Optional[int]:
if readme_content is not None:
_UpperCamelCase, _UpperCamelCase : Optional[int] = _split_yaml_from_readme(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
_UpperCamelCase : Union[str, Any] = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , __a : List[Any] ) -> int:
_UpperCamelCase : List[str] = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_UpperCamelCase : Union[str, Any] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding="utf-8" , ).decode("utf-8" )
lowerCamelCase__ = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase__ = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
lowerCamelCase__ = ap.parse_args()
lowerCamelCase__ = Path(args.readme_filepath)
lowerCamelCase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 711
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Tuple = 0
for i in range(1 ,1_001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 712
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 0
|
"""simple docstring"""
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
SCREAMING_SNAKE_CASE__ :Tuple = "CIDAS/clipseg-rd64-refined"
SCREAMING_SNAKE_CASE__ :Tuple = "image_segmenter"
SCREAMING_SNAKE_CASE__ :List[Any] = CLIPSegForImageSegmentation
SCREAMING_SNAKE_CASE__ :List[Any] = ["image", "text"]
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["image"]
def __init__( self : List[str] , *__a : str , **__a : Optional[Any] ) -> List[str]:
requires_backends(self , ["vision"] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Any , __a : Any ) -> str:
return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Any ) -> Tuple:
with torch.no_grad():
_UpperCamelCase : Any = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Dict = outputs.cpu().detach().numpy()
_UpperCamelCase : Tuple = 0
_UpperCamelCase : str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
"""simple docstring"""
from PIL import Image
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Dict = image.size
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Optional[int] = image.load()
for i in range(lowercase_ ):
for j in range(lowercase_ ):
_UpperCamelCase : List[str] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase_ ):
for i in range(lowercase_ ):
_UpperCamelCase : Tuple = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase__ = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
stooge(SCREAMING_SNAKE_CASE_ ,0 ,len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_UpperCamelCase, _UpperCamelCase : Any = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_UpperCamelCase : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,(h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ ,i + t ,(SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,(h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 715
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 0
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : List[str] = min(_lowerCAmelCase ) # min() finds the minimum value
_UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # max() finds the maximum value
_UpperCamelCase : Union[str, Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_UpperCamelCase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_UpperCamelCase : Union[str, Any] = 0
for count in range(_lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
_UpperCamelCase : Dict = count + min_val
i += 1
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCAmelCase )
print("Sorted order is:" ," ".join(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 716
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCamelCase__ = logging.getLogger(__name__)
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" ,type=lowercase_ ,default="wikitext" ,help="Name of the training. Explore datasets at: hf.co/datasets." ,)
parser.add_argument(
"--dataset_config" ,type=lowercase_ ,default="wikitext-103-raw-v1" ,help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" ,type=lowercase_ ,default="sayakpaul/unigram-tokenizer-wikitext" ,help="Tokenizer identifier. Can be a local filepath or a Hub identifier." ,)
parser.add_argument(
"--shard_size" ,type=lowercase_ ,default=1_000 ,help="Number of entries to go in a single shard." ,)
parser.add_argument("--split" ,type=lowercase_ ,default="train" ,choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" ,default=lowercase_ ,type=lowercase_ ,help="Limit the number of shards (used for debugging)." ,)
parser.add_argument(
"--max_length" ,type=lowercase_ ,default=512 ,help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." ,)
parser.add_argument(
"--output_dir" ,default="tf-tpu" ,type=lowercase_ ,help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." ,)
_UpperCamelCase : Any = parser.parse_args()
return args
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
def fn(lowercase_ ):
return tokenizer(examples["text"] )
return fn
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
for i in range(len(tokenized_data["input_ids"] ) ):
_UpperCamelCase : Tuple = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_UpperCamelCase : Union[str, Any] = tf.train.Features(feature=lowercase_ )
_UpperCamelCase : Tuple = tf.train.Example(features=lowercase_ )
_UpperCamelCase : List[Any] = example.SerializeToString()
records.append(lowercase_ )
return records
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name ,args.dataset_config ,split=args.split )
if args.limit is not None:
_UpperCamelCase : List[Any] = min(len(lowercase_ ) ,args.limit )
_UpperCamelCase : Union[str, Any] = dataset.select(range(lowercase_ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCamelCase : int = os.path.join(args.output_dir ,args.split )
if not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
else:
_UpperCamelCase : Tuple = os.path.join(args.output_dir ,args.split )
# Tokenize the whole dataset at once.
_UpperCamelCase : Optional[Any] = tokenize_function(lowercase_ )
_UpperCamelCase : Optional[Any] = dataset.map(lowercase_ ,batched=lowercase_ ,num_proc=4 ,remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase_ ):
# Concatenate all texts.
_UpperCamelCase : Tuple = {k: sum(examples[k] ,[] ) for k in examples.keys()}
_UpperCamelCase : Union[str, Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCamelCase : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCamelCase : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 ,lowercase_ ,args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCamelCase : Any = dataset_tokenized.map(lowercase_ ,batched=lowercase_ ,batch_size=1_000 ,num_proc=4 )
_UpperCamelCase : str = 0
_UpperCamelCase : Any = 0
for shard in range(0 ,len(lowercase_ ) ,args.shard_size ):
_UpperCamelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
_UpperCamelCase : str = len(dataset_snapshot["input_ids"] )
_UpperCamelCase : List[str] = os.path.join(lowercase_ ,F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCamelCase : Any = get_serialized_examples(lowercase_ )
with tf.io.TFRecordWriter(lowercase_ ) as out_file:
for i in range(len(lowercase_ ) ):
_UpperCamelCase : Any = serialized_examples[i]
out_file.write(lowercase_ )
print("Wrote file {} containing {} records".format(lowercase_ ,lowercase_ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' ,"w" ) as f:
print(F'''Total {args.split} records: {total_records}''' ,file=lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
main(args)
| 717
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowerCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowerCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowerCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowerCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowerCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowerCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowerCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowerCamelCase__ = state_dict["cls.predictions.decoder.weight"]
lowerCamelCase__ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowerCamelCase__ = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 718
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.