code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=7 , __a : int=3 , __a : str=18 , __a : Any=30 , __a : Optional[int]=400 , __a : Tuple=True , __a : Optional[Any]=32 , __a : Union[str, Any]=True , ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : int = num_channels
_UpperCamelCase : List[str] = image_size
_UpperCamelCase : int = min_resolution
_UpperCamelCase : Union[str, Any] = max_resolution
_UpperCamelCase : Tuple = do_resize
_UpperCamelCase : List[Any] = size_divisor
_UpperCamelCase : Dict = do_rescale
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = GLPNImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : Optional[int] = GLPNImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size_divisor" ) )
self.assertTrue(hasattr(__a , "resample" ) )
self.assertTrue(hasattr(__a , "do_rescale" ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
# Initialize image_processing
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Initialize image_processing
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
# Initialize image_processing
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 363
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCamelCase : Dict = parser.parse_args()
return args.f
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
_UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[Any] ) -> Tuple:
_UpperCamelCase : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(__a , "argv" , __a ):
_UpperCamelCase : str = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__a , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
_UpperCamelCase : int = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(__a )
_UpperCamelCase : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(__a )
_UpperCamelCase : List[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(__a )
| 364
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 0
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , _UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = RobertaConfig
SCREAMING_SNAKE_CASE__ :Any = "roberta"
def __init__( self : int , __a : List[Any] ) -> List[str]:
super().__init__(__a )
_UpperCamelCase : Optional[Any] = RobertaEmbeddings(__a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , _UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = RobertaConfig
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "roberta"
def __init__( self : Tuple , __a : Optional[int] ) -> Optional[int]:
super().__init__(__a )
_UpperCamelCase : Tuple = config.num_labels
_UpperCamelCase : Dict = config.num_hidden_layers
_UpperCamelCase : Optional[Any] = DeeRobertaModel(__a )
_UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
_UpperCamelCase : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int=None , __a : List[str]=None , __a : Optional[Any]=None , __a : int=None , __a : Dict=None , __a : Optional[Any]=None , __a : int=None , __a : Dict=-1 , __a : Union[str, Any]=False , ) -> str:
_UpperCamelCase : Tuple = self.num_layers
try:
_UpperCamelCase : Union[str, Any] = self.roberta(
__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , )
_UpperCamelCase : int = outputs[1]
_UpperCamelCase : Any = self.dropout(__a )
_UpperCamelCase : Optional[int] = self.classifier(__a )
_UpperCamelCase : str = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_UpperCamelCase : str = e.message
_UpperCamelCase : Tuple = e.exit_layer
_UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
_UpperCamelCase : List[Any] = entropy(__a )
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_UpperCamelCase : List[Any] = MSELoss()
_UpperCamelCase : List[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCamelCase : Tuple = CrossEntropyLoss()
_UpperCamelCase : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_UpperCamelCase : Any = []
for highway_exit in outputs[-1]:
_UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(__a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_UpperCamelCase : Optional[Any] = MSELoss()
_UpperCamelCase : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCamelCase : Optional[Any] = CrossEntropyLoss()
_UpperCamelCase : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__a )
if train_highway:
_UpperCamelCase : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_UpperCamelCase : Union[str, Any] = (loss,) + outputs
if not self.training:
_UpperCamelCase : int = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 365
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(lowercase_ ,lowercase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(lowercase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 366
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 0
|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(lowercase_ )
_UpperCamelCase : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = tf.convert_to_tensor(lowercase_ )
_UpperCamelCase : Optional[Any] = tf.cast(math.pi ,x.dtype )
_UpperCamelCase : Union[str, Any] = tf.cast(0.04_4715 ,x.dtype )
_UpperCamelCase : Optional[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase_ ,3 )) ))
return x * cdf
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Dict = tf.convert_to_tensor(lowercase_ )
return x * tf.tanh(tf.math.softplus(lowercase_ ) )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(lowercase_ )
_UpperCamelCase : int = tf.cast(0.04_4715 ,x.dtype )
_UpperCamelCase : Optional[Any] = tf.cast(0.79_7884_5608 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(lowercase_ )
_UpperCamelCase : List[Any] = tf.cast(1.702 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
return tf.clip_by_value(_gelu(lowercase_ ) ,-10 ,10 )
def lowercase__ ( lowercase_ ,lowercase_=-1 ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tf.split(lowercase_ ,2 ,axis=lowercase_ )
return a * tf.math.sigmoid(lowercase_ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return tf.keras.activations.gelu(lowercase_ ,approximate=lowercase_ )
lowerCamelCase__ = tf.keras.activations.gelu
lowerCamelCase__ = approximate_gelu_wrap
else:
lowerCamelCase__ = _gelu
lowerCamelCase__ = _gelu_new
lowerCamelCase__ = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 367
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.dummy_uncond_unet
_UpperCamelCase : List[Any] = PNDMScheduler()
_UpperCamelCase : Any = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : str = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" ).images
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" , return_dict=__a )[0]
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
_UpperCamelCase : Optional[int] = "google/ddpm-cifar10-32"
_UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(__a )
_UpperCamelCase : Optional[Any] = PNDMScheduler()
_UpperCamelCase : str = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = pndm(generator=__a , output_type="numpy" ).images
_UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : str = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 368
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 0
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = None
SCREAMING_SNAKE_CASE__ :int = BloomTokenizerFast
SCREAMING_SNAKE_CASE__ :Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Optional[int] = "tokenizer_file"
SCREAMING_SNAKE_CASE__ :int = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
super().setUp()
_UpperCamelCase : int = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str , **__a : Tuple ) -> List[str]:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : List[Any] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
_UpperCamelCase : int = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
_UpperCamelCase : List[str] = tokenizer.batch_encode_plus(__a )["input_ids"]
self.assertListEqual(__a , __a )
_UpperCamelCase : List[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int]=6 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCamelCase : str = "This is a simple input"
_UpperCamelCase : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Tuple = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(__a , max_length=__a )
tokenizer_r.encode_plus(__a , max_length=__a )
tokenizer_r.batch_encode_plus(__a , max_length=__a )
tokenizer_r.encode(__a , max_length=__a )
tokenizer_r.batch_encode_plus(__a , max_length=__a )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
_UpperCamelCase : Tuple = None # Hotfixing padding = None
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Dict = self.get_rust_tokenizer()
_UpperCamelCase : Optional[Any] = load_dataset("xnli" , "all_languages" , split="test" , streaming=__a )
_UpperCamelCase : List[str] = next(iter(__a ) )["premise"] # pick up one data
_UpperCamelCase : List[str] = list(sample_data.values() )
_UpperCamelCase : Any = list(map(tokenizer.encode , __a ) )
_UpperCamelCase : List[Any] = [tokenizer.decode(__a , clean_up_tokenization_spaces=__a ) for x in output_tokens]
self.assertListEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 369
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = "wav2vec2"
def __init__( self : str , __a : str=32 , __a : int=768 , __a : str=12 , __a : List[Any]=12 , __a : List[Any]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : Any=0.1 , __a : Optional[int]=0.1 , __a : List[Any]=0.0 , __a : Optional[Any]=0.0 , __a : str=0.1 , __a : List[str]=0.1 , __a : Any=0.02 , __a : Optional[int]=1e-5 , __a : List[str]="group" , __a : str="gelu" , __a : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __a : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __a : Dict=(10, 3, 3, 3, 3, 2, 2) , __a : Optional[int]=False , __a : List[Any]=128 , __a : List[str]=16 , __a : str=False , __a : Optional[Any]=True , __a : Union[str, Any]=0.05 , __a : Dict=10 , __a : Tuple=2 , __a : Any=0.0 , __a : Optional[Any]=10 , __a : Union[str, Any]=0 , __a : List[Any]=320 , __a : Any=2 , __a : List[Any]=0.1 , __a : Dict=100 , __a : Any=256 , __a : Optional[Any]=256 , __a : str=0.1 , __a : Any="sum" , __a : Optional[Any]=False , __a : int=False , __a : int=256 , __a : Dict=(512, 512, 512, 512, 1500) , __a : Optional[Any]=(5, 3, 3, 1, 1) , __a : List[Any]=(1, 2, 3, 1, 1) , __a : List[str]=512 , __a : List[Any]=0 , __a : Tuple=1 , __a : int=2 , __a : List[Any]=False , __a : List[str]=3 , __a : Dict=2 , __a : str=3 , __a : Tuple=None , __a : Tuple=None , **__a : Dict , ) -> List[Any]:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = feat_extract_norm
_UpperCamelCase : Optional[int] = feat_extract_activation
_UpperCamelCase : Optional[int] = list(__a )
_UpperCamelCase : str = list(__a )
_UpperCamelCase : List[Any] = list(__a )
_UpperCamelCase : List[Any] = conv_bias
_UpperCamelCase : str = num_conv_pos_embeddings
_UpperCamelCase : Any = num_conv_pos_embedding_groups
_UpperCamelCase : Union[str, Any] = len(self.conv_dim )
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Tuple = hidden_dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : Dict = activation_dropout
_UpperCamelCase : Any = feat_proj_dropout
_UpperCamelCase : List[Any] = final_dropout
_UpperCamelCase : Dict = layerdrop
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : int = do_stable_layer_norm
_UpperCamelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : Optional[int] = apply_spec_augment
_UpperCamelCase : List[str] = mask_time_prob
_UpperCamelCase : List[Any] = mask_time_length
_UpperCamelCase : Dict = mask_time_min_masks
_UpperCamelCase : str = mask_feature_prob
_UpperCamelCase : Optional[Any] = mask_feature_length
_UpperCamelCase : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCamelCase : Dict = num_codevectors_per_group
_UpperCamelCase : List[str] = num_codevector_groups
_UpperCamelCase : Union[str, Any] = contrastive_logits_temperature
_UpperCamelCase : List[Any] = feat_quantizer_dropout
_UpperCamelCase : Dict = num_negatives
_UpperCamelCase : Optional[int] = codevector_dim
_UpperCamelCase : Union[str, Any] = proj_codevector_dim
_UpperCamelCase : Tuple = diversity_loss_weight
# ctc loss
_UpperCamelCase : Union[str, Any] = ctc_loss_reduction
_UpperCamelCase : List[Any] = ctc_zero_infinity
# adapter
_UpperCamelCase : str = add_adapter
_UpperCamelCase : Optional[Any] = adapter_kernel_size
_UpperCamelCase : Optional[int] = adapter_stride
_UpperCamelCase : int = num_adapter_layers
_UpperCamelCase : List[Any] = output_hidden_size or hidden_size
_UpperCamelCase : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : Union[str, Any] = list(__a )
_UpperCamelCase : Any = list(__a )
_UpperCamelCase : Dict = list(__a )
_UpperCamelCase : Tuple = xvector_output_dim
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 370
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = abs(lowercase_ )
_UpperCamelCase : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = abs(lowercase_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
return sum(int(lowercase_ ) for c in str(abs(lowercase_ ) ) )
def lowercase__ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase_ ,lowercase_ ) -> None:
_UpperCamelCase : Union[str, Any] = F'''{func.__name__}({value})'''
_UpperCamelCase : Dict = timeit(F'''__main__.{call}''' ,setup="import __main__" )
print(F'''{call:56} = {func(lowercase_ )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase_ ,lowercase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 371
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 350
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 0
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "The Nymphenburg Palace is a beautiful palace in Munich!"
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
_UpperCamelCase : Any = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_UpperCamelCase : int = BERTEncoder(
attention_cell=predefined_args["attention_cell"] ,num_layers=predefined_args["num_layers"] ,units=predefined_args["units"] ,hidden_size=predefined_args["hidden_size"] ,max_length=predefined_args["max_length"] ,num_heads=predefined_args["num_heads"] ,scaled=predefined_args["scaled"] ,dropout=predefined_args["dropout"] ,output_attention=lowercase_ ,output_all_encodings=lowercase_ ,use_residual=predefined_args["use_residual"] ,activation=predefined_args.get("activation" ,"gelu" ) ,layer_norm_eps=predefined_args.get("layer_norm_eps" ,lowercase_ ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_UpperCamelCase : int = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
_UpperCamelCase : Union[str, Any] = os.path.join(get_home_dir() ,"models" )
_UpperCamelCase : Optional[Any] = _load_vocab(lowercase_ ,lowercase_ ,lowercase_ ,cls=lowercase_ )
_UpperCamelCase : Any = nlp.model.BERTModel(
lowercase_ ,len(lowercase_ ) ,units=predefined_args["units"] ,embed_size=predefined_args["embed_size"] ,embed_dropout=predefined_args["embed_dropout"] ,word_embed=predefined_args["word_embed"] ,use_pooler=lowercase_ ,use_token_type_embed=lowercase_ ,token_type_vocab_size=predefined_args["token_type_vocab_size"] ,use_classifier=lowercase_ ,use_decoder=lowercase_ ,)
original_bort.load_parameters(lowercase_ ,cast_dtype=lowercase_ ,ignore_extra=lowercase_ )
_UpperCamelCase : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
_UpperCamelCase : Optional[Any] = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowercase_ ),
}
_UpperCamelCase : int = BertConfig.from_dict(lowercase_ )
_UpperCamelCase : Dict = BertForMaskedLM(lowercase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase_ ,lowercase_ ):
_UpperCamelCase : Optional[Any] = hf_param.shape
_UpperCamelCase : Dict = to_torch(params[gluon_param] )
_UpperCamelCase : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_UpperCamelCase : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,"word_embed.0.weight" )
_UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,"encoder.position_weight" )
_UpperCamelCase : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,"encoder.layer_norm.beta" )
_UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,"encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_UpperCamelCase : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
_UpperCamelCase : BertSelfAttention = layer.attention.self
_UpperCamelCase : str = check_and_map_params(
self_attn.key.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_UpperCamelCase : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_UpperCamelCase : str = check_and_map_params(
self_attn.query.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_UpperCamelCase : Any = check_and_map_params(
self_attn.value.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_UpperCamelCase : BertSelfOutput = layer.attention.output
_UpperCamelCase : Optional[Any] = check_and_map_params(
self_output.dense.bias ,F'''encoder.transformer_cells.{i}.proj.bias''' )
_UpperCamelCase : Union[str, Any] = check_and_map_params(
self_output.dense.weight ,F'''encoder.transformer_cells.{i}.proj.weight''' )
_UpperCamelCase : int = check_and_map_params(
self_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_UpperCamelCase : Dict = check_and_map_params(
self_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_UpperCamelCase : BertIntermediate = layer.intermediate
_UpperCamelCase : int = check_and_map_params(
intermediate.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_UpperCamelCase : List[str] = check_and_map_params(
intermediate.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_UpperCamelCase : BertOutput = layer.output
_UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_UpperCamelCase : Optional[Any] = check_and_map_params(
bert_output.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_UpperCamelCase : Tuple = check_and_map_params(
bert_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_UpperCamelCase : str = check_and_map_params(
bert_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_UpperCamelCase : Dict = RobertaTokenizer.from_pretrained("roberta-base" )
_UpperCamelCase : str = tokenizer.encode_plus(lowercase_ )["input_ids"]
# Get gluon output
_UpperCamelCase : Optional[Any] = mx.nd.array([input_ids] )
_UpperCamelCase : Optional[int] = original_bort(inputs=lowercase_ ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = BertModel.from_pretrained(lowercase_ )
hf_bort_model.eval()
_UpperCamelCase : Optional[Any] = tokenizer.encode_plus(lowercase_ ,return_tensors="pt" )
_UpperCamelCase : Optional[int] = hf_bort_model(**lowercase_ )[0]
_UpperCamelCase : int = output_gluon[0].asnumpy()
_UpperCamelCase : int = output_hf[0].detach().numpy()
_UpperCamelCase : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_UpperCamelCase : List[str] = np.allclose(lowercase_ ,lowercase_ ,atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" ,lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 351
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__ ( lowercase_ ) -> list[list[float]]:
"""simple docstring"""
_UpperCamelCase : int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_UpperCamelCase : Tuple = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
_UpperCamelCase : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
_UpperCamelCase : Optional[int] = matrix[1][1], matrix[0][0]
_UpperCamelCase : Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_UpperCamelCase : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
_UpperCamelCase : Union[str, Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_UpperCamelCase : str = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_UpperCamelCase : Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_UpperCamelCase : Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_UpperCamelCase : int = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_UpperCamelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_UpperCamelCase : str = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_UpperCamelCase : Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_UpperCamelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_UpperCamelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_UpperCamelCase : str = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
_UpperCamelCase : Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_UpperCamelCase : Tuple = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase_ )
# Calculate the inverse of the matrix
return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 352
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 0
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , __a : VQModel , __a : UNetaDModel , __a : DDIMScheduler ) -> int:
super().__init__()
self.register_modules(vqvae=__a , unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : Optional[int] , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : float = 0.0 , __a : int = 50 , __a : Optional[str] = "pil" , __a : bool = True , **__a : int , ) -> Union[Tuple, ImagePipelineOutput]:
_UpperCamelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__a , )
_UpperCamelCase : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase : Union[str, Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_UpperCamelCase : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase : Optional[Any] = {}
if accepts_eta:
_UpperCamelCase : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_UpperCamelCase : str = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
_UpperCamelCase : Union[str, Any] = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase : Optional[int] = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# decode the image latents with the VAE
_UpperCamelCase : Dict = self.vqvae.decode(__a ).sample
_UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase : List[Any] = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 353
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
_UpperCamelCase
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 354
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _UpperCamelCase , )
| 355
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
import requests
lowerCamelCase__ = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowercase__ ( lowercase_ ,lowercase_ = 1 ,lowercase_ = "new" ,lowercase_ = None ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ):
_UpperCamelCase : List[Any] = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(lowercase_ )
_UpperCamelCase : List[Any] = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' ,headers={"User-agent": "A random string"} ,)
if response.status_code == 429:
raise requests.HTTPError
_UpperCamelCase : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )}
_UpperCamelCase : List[str] = {}
for id_ in range(lowercase_ ):
_UpperCamelCase : Dict = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 356
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=__a , )
assert hasattr(self , "env" )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[str] ) -> Any:
_UpperCamelCase : Tuple = F'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
_UpperCamelCase : List[str] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__a , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version="py36" , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Dict:
TrainingJobAnalytics(__a ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str ) -> Union[str, Any]:
# create estimator
_UpperCamelCase : Optional[Any] = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
_UpperCamelCase : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_UpperCamelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCamelCase : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __a )
| 357
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "funnel"
SCREAMING_SNAKE_CASE__ :int = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : Any , __a : Dict=3_0522 , __a : List[Any]=[4, 4, 4] , __a : str=None , __a : int=2 , __a : Union[str, Any]=768 , __a : Dict=12 , __a : Optional[int]=64 , __a : Optional[Any]=3072 , __a : Tuple="gelu_new" , __a : List[str]=0.1 , __a : List[str]=0.1 , __a : Optional[Any]=0.0 , __a : Optional[int]=0.1 , __a : Any=None , __a : str=1e-9 , __a : Union[str, Any]="mean" , __a : Optional[int]="relative_shift" , __a : Any=True , __a : str=True , __a : Union[str, Any]=True , **__a : Optional[int] , ) -> List[Any]:
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = block_sizes
_UpperCamelCase : Tuple = [1] * len(__a ) if block_repeats is None else block_repeats
assert len(__a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : Optional[Any] = d_model
_UpperCamelCase : Dict = n_head
_UpperCamelCase : str = d_head
_UpperCamelCase : str = d_inner
_UpperCamelCase : str = hidden_act
_UpperCamelCase : int = hidden_dropout
_UpperCamelCase : Any = attention_dropout
_UpperCamelCase : Tuple = activation_dropout
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : str = initializer_std
_UpperCamelCase : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
_UpperCamelCase : Dict = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
_UpperCamelCase : Tuple = attention_type
_UpperCamelCase : Any = separate_cls
_UpperCamelCase : str = truncate_seq
_UpperCamelCase : Any = pool_q_only
super().__init__(**__a )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __SCREAMING_SNAKE_CASE ( self : int , __a : Union[str, Any] ) -> Union[str, Any]:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return len(self.block_sizes )
@num_blocks.setter
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict ) -> int:
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 358
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 0
|
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , **__a : int ) -> List[Any]:
super().__init__(**__a )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : List[Any] , __a : Union[np.ndarray, bytes, str] , **__a : Tuple ) -> Any:
return super().__call__(__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__a : Union[str, Any] ) -> Dict:
_UpperCamelCase : Tuple = {}
if "candidate_labels" in kwargs:
_UpperCamelCase : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
_UpperCamelCase : Optional[int] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Any , __a : str=None , __a : Dict="This is a sound of {}." ) -> Tuple:
if isinstance(__a , __a ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCamelCase : Optional[Any] = requests.get(__a ).content
else:
with open(__a , "rb" ) as f:
_UpperCamelCase : Tuple = f.read()
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = ffmpeg_read(__a , self.feature_extractor.sampling_rate )
if not isinstance(__a , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
_UpperCamelCase : Union[str, Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
_UpperCamelCase : Tuple = candidate_labels
_UpperCamelCase : int = [hypothesis_template.format(__a ) for x in candidate_labels]
_UpperCamelCase : Optional[Any] = self.tokenizer(__a , return_tensors=self.framework , padding=__a )
_UpperCamelCase : Optional[int] = [text_inputs]
return inputs
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : int ) -> List[Any]:
_UpperCamelCase : Optional[int] = model_inputs.pop("candidate_labels" )
_UpperCamelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __a ):
_UpperCamelCase : List[Any] = text_inputs[0]
else:
# Batching case.
_UpperCamelCase : int = text_inputs[0][0]
_UpperCamelCase : Union[str, Any] = self.model(**__a , **__a )
_UpperCamelCase : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = model_outputs.pop("candidate_labels" )
_UpperCamelCase : int = model_outputs["logits"][0]
if self.framework == "pt":
_UpperCamelCase : Optional[Any] = logits.softmax(dim=0 )
_UpperCamelCase : Dict = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
_UpperCamelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] )
]
return result
| 359
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 0
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Any = {}
SCREAMING_SNAKE_CASE__ :Tuple = False
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
super().setUp()
# fmt: off
_UpperCamelCase : Optional[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_UpperCamelCase : Any = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__a : Optional[int] ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Tuple ) -> int:
_UpperCamelCase : List[str] = "tester"
_UpperCamelCase : int = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : int = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Dict = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_UpperCamelCase : int = tokenizer.encode([special_token] , add_special_tokens=__a )
self.assertEqual(len(__a ) , 1 )
_UpperCamelCase : Union[str, Any] = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertTrue(special_token not in decoded )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Dict = self.get_input_output_texts(__a )
_UpperCamelCase : List[str] = tokenizer.tokenize(__a )
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(__a )
self.assertNotEqual(len(__a ) , 0 )
_UpperCamelCase : Union[str, Any] = tokenizer.decode(__a )
self.assertIsInstance(__a , __a )
self.assertEqual(text_a.replace(" " , "" ) , __a )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
pass
| 360
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 0
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase__ = 637_8137.0
lowerCamelCase__ = 635_6752.31_4245
lowerCamelCase__ = 637_8137
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCamelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
_UpperCamelCase : Any = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCamelCase : Optional[Any] = haversine_distance(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCamelCase : int = (b_lata + b_lata) / 2
_UpperCamelCase : Union[str, Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCamelCase : Union[str, Any] = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2)
_UpperCamelCase : Optional[Any] = cos(sigma / 2 ) ** 2
_UpperCamelCase : Any = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCamelCase : Union[str, Any] = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2)
_UpperCamelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCamelCase : List[str] = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 0
|
"""simple docstring"""
import baseaa
def lowercase__ ( lowercase_ ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return baseaa.baadecode(lowercase_ ).decode("utf-8" )
if __name__ == "__main__":
lowerCamelCase__ = "Hello World!"
lowerCamelCase__ = baseaa_encode(test)
print(encoded)
lowerCamelCase__ = baseaa_decode(encoded)
print(decoded)
| 362
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "vit_msn"
def __init__( self : List[Any] , __a : Union[str, Any]=768 , __a : Union[str, Any]=12 , __a : Any=12 , __a : int=3072 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=0.0 , __a : int=0.0 , __a : Optional[Any]=0.02 , __a : Dict=1e-0_6 , __a : List[Any]=224 , __a : Optional[int]=16 , __a : int=3 , __a : str=True , **__a : Optional[Any] , ) -> Optional[Any]:
super().__init__(**__a )
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : Any = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Any = qkv_bias
| 363
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 364
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , __a : Any , __a : Optional[Any]=13 , __a : List[Any]=7 , __a : List[str]=True , __a : Tuple=True , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : int=99 , __a : int=32 , __a : Dict=5 , __a : List[Any]=4 , __a : Tuple=37 , __a : List[str]="gelu" , __a : int=0.1 , __a : Optional[int]=0.1 , __a : str=512 , __a : Dict=16 , __a : Optional[int]=2 , __a : str=0.02 , __a : Any=4 , ) -> List[Any]:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : Dict = is_training
_UpperCamelCase : Optional[Any] = use_attention_mask
_UpperCamelCase : Optional[int] = use_token_type_ids
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : List[Any] = type_vocab_size
_UpperCamelCase : Optional[Any] = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Tuple = num_choices
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : int = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase : Tuple = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
_UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : List[str] = True
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Any = FlaxRobertaModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained("roberta-base" , from_pt=__a )
_UpperCamelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 365
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 0
|
"""simple docstring"""
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Any ) -> Optional[Any]:
_UpperCamelCase : int = data
_UpperCamelCase : Union[str, Any] = None
def __repr__( self : List[Any] ) -> str:
return F'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int ) -> List[Any]:
_UpperCamelCase : Tuple = None
def __iter__( self : Union[str, Any] ) -> Any:
_UpperCamelCase : List[str] = self.head
while node:
yield node.data
_UpperCamelCase : Union[str, Any] = node.next
def __len__( self : Any ) -> int:
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
return "->".join([str(__a ) for item in self] )
def __getitem__( self : Optional[Any] , __a : int ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Dict , __a : int , __a : Any ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
_UpperCamelCase : List[str] = self.head
for _ in range(__a ):
_UpperCamelCase : str = current.next
_UpperCamelCase : Any = data
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> None:
self.insert_nth(len(self ) , __a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any ) -> None:
self.insert_nth(0 , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : int , __a : Any ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
_UpperCamelCase : Any = Node(__a )
if self.head is None:
_UpperCamelCase : int = new_node
elif index == 0:
_UpperCamelCase : List[str] = self.head # link new_node to head
_UpperCamelCase : List[Any] = new_node
else:
_UpperCamelCase : List[str] = self.head
for _ in range(index - 1 ):
_UpperCamelCase : List[Any] = temp.next
_UpperCamelCase : Any = temp.next
_UpperCamelCase : List[Any] = new_node
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: # print every node data
print(self )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
_UpperCamelCase : str = self.head # default first node
if index == 0:
_UpperCamelCase : Dict = self.head.next
else:
_UpperCamelCase : Tuple = self.head
for _ in range(index - 1 ):
_UpperCamelCase : Tuple = temp.next
_UpperCamelCase : Any = temp.next
_UpperCamelCase : Optional[Any] = temp.next.next
return delete_node.data
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> bool:
return self.head is None
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Dict = self.head
while current:
# Store the current node's next node.
_UpperCamelCase : List[Any] = current.next
# Make the current node's next point backwards
_UpperCamelCase : Tuple = prev
# Make the previous node be the current node
_UpperCamelCase : Dict = current
# Make the current node the next node (to progress iteration)
_UpperCamelCase : Optional[int] = next_node
# Return prev in order to put the head at the end
_UpperCamelCase : int = prev
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase_ ) == i
linked_list.insert_nth(lowercase_ ,i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase_ ) == 9
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
_UpperCamelCase : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 ,1 ) )
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [
-9,
100,
Node(77_345_112 ),
"dlrow olleH",
7,
5_555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
_UpperCamelCase : Union[str, Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_UpperCamelCase : Optional[int] = linked_list.delete_head()
assert result == -9
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_UpperCamelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_UpperCamelCase : int = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase_ )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
from doctest import testmod
testmod()
_UpperCamelCase : Union[str, Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowercase_ )
print("\nReading/changing Node data using indexing:" )
print(F'''Element at Position 1: {linked_list[1]}''' )
_UpperCamelCase : Union[str, Any] = input("Enter New Value: " ).strip()
print("New list:" )
print(lowercase_ )
print(F'''length of linked_list is : {len(lowercase_ )}''' )
if __name__ == "__main__":
main()
| 366
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 0
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "xlnet"
SCREAMING_SNAKE_CASE__ :List[Any] = ["mems"]
SCREAMING_SNAKE_CASE__ :Any = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __a : Union[str, Any]=3_2000 , __a : Optional[int]=1024 , __a : List[str]=24 , __a : Optional[int]=16 , __a : Tuple=4096 , __a : Optional[Any]="gelu" , __a : Tuple=True , __a : str="bi" , __a : str=0.02 , __a : Dict=1e-1_2 , __a : Optional[int]=0.1 , __a : List[Any]=512 , __a : Any=None , __a : List[str]=True , __a : str=False , __a : int=False , __a : Any=-1 , __a : int=False , __a : Dict="last" , __a : Tuple=True , __a : Any="tanh" , __a : Optional[int]=0.1 , __a : str=5 , __a : Tuple=5 , __a : Dict=5 , __a : Optional[Any]=1 , __a : List[str]=2 , **__a : str , ) -> Dict:
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Optional[int] = d_model
_UpperCamelCase : int = n_layer
_UpperCamelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_UpperCamelCase : str = d_model // n_head
_UpperCamelCase : List[Any] = ff_activation
_UpperCamelCase : str = d_inner
_UpperCamelCase : Any = untie_r
_UpperCamelCase : List[Any] = attn_type
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : str = dropout
_UpperCamelCase : int = mem_len
_UpperCamelCase : Union[str, Any] = reuse_len
_UpperCamelCase : int = bi_data
_UpperCamelCase : List[str] = clamp_len
_UpperCamelCase : List[Any] = same_length
_UpperCamelCase : Union[str, Any] = summary_type
_UpperCamelCase : str = summary_use_proj
_UpperCamelCase : Any = summary_activation
_UpperCamelCase : Dict = summary_last_dropout
_UpperCamelCase : Optional[Any] = start_n_top
_UpperCamelCase : Tuple = end_n_top
_UpperCamelCase : Any = bos_token_id
_UpperCamelCase : Union[str, Any] = pad_token_id
_UpperCamelCase : int = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __a , )
_UpperCamelCase : List[str] = kwargs["use_cache"]
_UpperCamelCase : str = use_mems_eval
_UpperCamelCase : str = use_mems_train
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[str] ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 367
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 368
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : Optional[int] = str(lowercase_ )
return n == n[::-1]
def lowercase__ ( lowercase_ = 1_000_000 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = 0
for i in range(1 ,lowercase_ ):
if is_palindrome(lowercase_ ) and is_palindrome(bin(lowercase_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 369
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 370
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=1e-12 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(lowercase_ ,axis=1 ) ,a_min=lowercase_ ) ).T
_UpperCamelCase : int = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(lowercase_ ,axis=1 ) ,a_min=lowercase_ ) ).T
return jnp.matmul(lowercase_ ,norm_emb_a.T )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :CLIPConfig
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Any = FlaxCLIPVisionModule(self.config.vision_config )
_UpperCamelCase : Dict = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype )
_UpperCamelCase : Tuple = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
_UpperCamelCase : Tuple = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_UpperCamelCase : Union[str, Any] = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
_UpperCamelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , __a : str ) -> str:
_UpperCamelCase : Union[str, Any] = self.vision_model(__a )[1]
_UpperCamelCase : Optional[int] = self.visual_projection(__a )
_UpperCamelCase : Any = jax_cosine_distance(__a , self.special_care_embeds )
_UpperCamelCase : List[str] = jax_cosine_distance(__a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_UpperCamelCase : int = 0.0
_UpperCamelCase : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_UpperCamelCase : Any = jnp.round(__a , 3 )
_UpperCamelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__a )
# Use a lower threshold if an image has any special care concept
_UpperCamelCase : Optional[int] = is_special_care * 0.01
_UpperCamelCase : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_UpperCamelCase : List[str] = jnp.round(__a , 3 )
_UpperCamelCase : Union[str, Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = CLIPConfig
SCREAMING_SNAKE_CASE__ :Dict = "clip_input"
SCREAMING_SNAKE_CASE__ :Union[str, Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Dict , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : Optional[int] , ) -> Tuple:
if input_shape is None:
_UpperCamelCase : Optional[Any] = (1, 224, 224, 3)
_UpperCamelCase : List[str] = self.module_class(config=__a , dtype=__a , **__a )
super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ) -> FrozenDict:
# init input tensor
_UpperCamelCase : Optional[int] = jax.random.normal(__a , __a )
_UpperCamelCase : Tuple = jax.random.split(__a )
_UpperCamelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
_UpperCamelCase : Optional[int] = self.module.init(__a , __a )["params"]
return random_params
def __call__( self : Tuple , __a : List[Any] , __a : dict = None , ) -> str:
_UpperCamelCase : Tuple = jnp.transpose(__a , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
| 371
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 0
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowercase : str = get_logger(__name__)
lowercase : List[Any] = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class A :
@add_start_docstrings(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class A :
@add_start_docstrings(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class A ( __snake_case ):
@add_start_docstrings(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
A : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(SCREAMING_SNAKE_CASE ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
A : List[str] = processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
A : Union[str, Any] = processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
A : Any = temperature
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
A : Union[str, Any] = scores / self.temperature
return scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -float('''Inf''' ) , SCREAMING_SNAKE_CASE = 1 ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
A : Tuple = top_p
A : Tuple = filter_value
A : str = min_tokens_to_keep
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
A, A : List[Any] = lax.top_k(SCREAMING_SNAKE_CASE , scores.shape[-1] )
A : Dict = jnp.full_like(SCREAMING_SNAKE_CASE , self.filter_value )
A : Optional[int] = jax.nn.softmax(SCREAMING_SNAKE_CASE , axis=-1 ).cumsum(axis=-1 )
A : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A : Tuple = jnp.roll(SCREAMING_SNAKE_CASE , 1 )
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE )
# min tokens to keep
A : Optional[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE )
A : str = jnp.where(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : int = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[-1]
return next_scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -float('''Inf''' ) , SCREAMING_SNAKE_CASE = 1 ) -> Any:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
A : Any = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = filter_value
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
A, A : Optional[int] = scores.shape
A : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value )
A : str = min(self.top_k , scores.shape[-1] ) # Safety check
A, A : Any = lax.top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A : Any = topk_scores.flatten()
A : Tuple = topk_indices.flatten() + shift
A : str = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE )
A : str = next_scores_flat.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return next_scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Any = bos_token_id
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
A : Optional[int] = jnp.full(scores.shape , -float('''inf''' ) )
A : Dict = 1 - jnp.bool_(cur_len - 1 )
A : List[Any] = jnp.where(SCREAMING_SNAKE_CASE , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE )
return scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : List[str] = max_length
A : Optional[Any] = eos_token_id
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
A : List[Any] = jnp.full(scores.shape , -float('''inf''' ) )
A : Any = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A : Dict = jnp.where(SCREAMING_SNAKE_CASE , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE )
return scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
A : List[Any] = min_length
A : List[Any] = eos_token_id
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
A : Union[str, Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A : Union[str, Any] = jnp.where(SCREAMING_SNAKE_CASE , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE )
return scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Tuple = list(SCREAMING_SNAKE_CASE )
A : Dict = begin_index
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[str] = 1 - jnp.bool_(cur_len - self.begin_index )
A : List[str] = jnp.where(SCREAMING_SNAKE_CASE , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE )
return scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = list(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
A : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Optional[Any] = dict(SCREAMING_SNAKE_CASE )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A : int = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A : List[Any] = force_token_array.at[index].set(SCREAMING_SNAKE_CASE )
A : List[str] = jnp.intaa(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(SCREAMING_SNAKE_CASE ):
A : List[str] = scores.shape[0]
A : int = self.force_token_array[generation_idx]
A : Union[str, Any] = jnp.ones_like(SCREAMING_SNAKE_CASE , dtype=scores.dtype ) * -float('''inf''' )
A : Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A : List[str] = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (0, current_token) )
return new_scores
A : Dict = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE ) , lambda: scores , ) , )
return scores
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = generate_config.eos_token_id
A : Union[str, Any] = generate_config.no_timestamps_token_id
A : Any = generate_config.no_timestamps_token_id + 1
A : Tuple = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE , '''max_initial_timestamp_index''' ):
A : List[str] = generate_config.max_initial_timestamp_index
else:
A : int = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A : int = model_config.vocab_size
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE , )
A : Optional[int] = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return jnp.where(
SCREAMING_SNAKE_CASE , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , SCREAMING_SNAKE_CASE , )
A : Tuple = jax.vmap(SCREAMING_SNAKE_CASE )(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE , )
A : Dict = self.timestamp_begin + self.max_initial_timestamp_index
A : Optional[int] = jnp.where(
SCREAMING_SNAKE_CASE , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE , )
# if sum of probability over timestamps is above any other token, sample timestamp
A : List[Any] = jax.nn.log_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
def handle_cumulative_probs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Optional[Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE , )
A : List[Any] = jax.vmap(SCREAMING_SNAKE_CASE )(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return scores
| 311
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase : str = datasets.utils.logging.get_logger(__name__)
lowercase : Union[str, Any] = ['names', 'prefix']
lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase : List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase : Any = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
__magic_name__ = ","
__magic_name__ = None
__magic_name__ = "infer"
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = "."
__magic_name__ = None
__magic_name__ = '"'
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = None
__magic_name__ = 10000
__magic_name__ = None
__magic_name__ = "strict"
__magic_name__ = "error"
__magic_name__ = None
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
A : Optional[Any] = self.delimiter
if self.column_names is not None:
A : Optional[Any] = self.column_names
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
__magic_name__ = CsvConfig
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ):
A : str = data_files
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = [files]
A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A : Tuple = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[str] = [files]
A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
A : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return pa_table
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ):
A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ):
A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 311
| 1
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase : Optional[Any] = datasets.load_iris()
lowercase : Optional[Any] = np.array(data['data'])
lowercase : Optional[Any] = np.array(data['target'])
lowercase : Optional[int] = data['target_names']
lowercase , lowercase , lowercase , lowercase : List[Any] = train_test_split(X, y)
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return np.linalg.norm(np.array(snake_case__ ) - np.array(snake_case__ ) )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=5 ):
'''simple docstring'''
A : Dict = zip(snake_case__ , snake_case__ )
# List of distances of all points from the point to be classified
A : Any = []
for data_point in data:
A : Any = euclidean_distance(data_point[0] , snake_case__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A : str = [i[1] for i in sorted(snake_case__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A : List[Any] = Counter(snake_case__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : int = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class A ( __snake_case ):
__magic_name__ = '''sew'''
def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
A : Optional[Any] = hidden_size
A : Any = feat_extract_norm
A : Optional[int] = feat_extract_activation
A : Tuple = list(SCREAMING_SNAKE_CASE )
A : List[str] = list(SCREAMING_SNAKE_CASE )
A : List[str] = list(SCREAMING_SNAKE_CASE )
A : int = conv_bias
A : List[Any] = num_conv_pos_embeddings
A : Tuple = num_conv_pos_embedding_groups
A : int = len(self.conv_dim )
A : Dict = num_hidden_layers
A : Optional[int] = intermediate_size
A : Any = squeeze_factor
A : int = hidden_act
A : str = num_attention_heads
A : Dict = hidden_dropout
A : Optional[Any] = attention_dropout
A : List[str] = activation_dropout
A : Union[str, Any] = feat_proj_dropout
A : Union[str, Any] = final_dropout
A : int = layerdrop
A : Optional[Any] = layer_norm_eps
A : Any = initializer_range
A : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : Optional[Any] = apply_spec_augment
A : Optional[Any] = mask_time_prob
A : Union[str, Any] = mask_time_length
A : Optional[Any] = mask_time_min_masks
A : str = mask_feature_prob
A : Tuple = mask_feature_length
A : Any = mask_feature_min_masks
# ctc loss
A : List[Any] = ctc_loss_reduction
A : Dict = ctc_zero_infinity
# sequence classification
A : int = use_weighted_layer_sum
A : Optional[int] = classifier_proj_size
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : str = len(snake_case__ )
# We need to create solution object to save path.
A : Optional[int] = [[0 for _ in range(snake_case__ )] for _ in range(snake_case__ )]
A : str = run_maze(snake_case__ , 0 , 0 , snake_case__ )
if solved:
print('''\n'''.join(str(snake_case__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = len(snake_case__ )
# Final check point.
if i == j == (size - 1):
A : List[Any] = 1
return True
A : Any = (not i < 0) and (not j < 0) # Check lower bounds
A : Optional[int] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A : str = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A : Optional[int] = 1
# check for directions
if (
run_maze(snake_case__ , i + 1 , snake_case__ , snake_case__ )
or run_maze(snake_case__ , snake_case__ , j + 1 , snake_case__ )
or run_maze(snake_case__ , i - 1 , snake_case__ , snake_case__ )
or run_maze(snake_case__ , snake_case__ , j - 1 , snake_case__ )
):
return True
A : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = SwinConfig()
A : List[Any] = swin_name.split('''_''' )
A : Tuple = name_split[1]
A : Union[str, Any] = int(name_split[4] )
A : str = int(name_split[3][-1] )
if model_size == "tiny":
A : Optional[int] = 96
A : Optional[Any] = (2, 2, 6, 2)
A : Any = (3, 6, 12, 24)
elif model_size == "small":
A : Optional[int] = 96
A : str = (2, 2, 18, 2)
A : Tuple = (3, 6, 12, 24)
elif model_size == "base":
A : int = 128
A : Optional[Any] = (2, 2, 18, 2)
A : List[str] = (4, 8, 16, 32)
else:
A : Dict = 192
A : Optional[Any] = (2, 2, 18, 2)
A : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
A : Dict = 2_1841
else:
A : str = 1000
A : List[str] = '''huggingface/label-files'''
A : Any = '''imagenet-1k-id2label.json'''
A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
A : str = {int(snake_case__ ): v for k, v in idalabel.items()}
A : Tuple = idalabel
A : Tuple = {v: k for k, v in idalabel.items()}
A : Tuple = img_size
A : Dict = num_classes
A : Optional[Any] = embed_dim
A : str = depths
A : str = num_heads
A : Optional[int] = window_size
return config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if "patch_embed.proj" in name:
A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
A : Optional[int] = '''encoder.''' + name
if "attn.proj" in name:
A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
A : Tuple = '''layernorm.weight'''
if name == "norm.bias":
A : Tuple = '''layernorm.bias'''
if "head" in name:
A : Any = name.replace('''head''' , '''classifier''' )
else:
A : List[Any] = '''swin.''' + name
return name
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A : Dict = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A : Dict = key.split('''.''' )
A : Optional[int] = int(key_split[1] )
A : List[str] = int(key_split[3] )
A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A : Any = val[:dim, :]
A : Dict = val[
dim : dim * 2, :
]
A : List[str] = val[-dim:, :]
else:
A : Any = val[
:dim
]
A : Optional[int] = val[
dim : dim * 2
]
A : Any = val[
-dim:
]
else:
A : str = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A : Optional[Any] = get_swin_config(snake_case__ )
A : Optional[int] = SwinForImageClassification(snake_case__ )
model.eval()
A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' )
A : Any = timm_model(inputs['''pixel_values'''] )
A : Optional[Any] = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase : int = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 311
| 1
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class A ( __snake_case ):
__magic_name__ = '''M-CLIP'''
def __init__( self , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=768 , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = transformerDimSize
A : Optional[Any] = imageDimSize
super().__init__(**SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = MCLIPConfig
def __init__( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : List[Any] = XLMRobertaModel(SCREAMING_SNAKE_CASE )
A : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Optional[Any] = self.transformer(input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(SCREAMING_SNAKE_CASE ), embs
| 311
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = vocab_size
A : List[str] = hidden_size
A : List[Any] = d_kv
A : Optional[Any] = d_ff
A : Dict = num_layers
A : Dict = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : Dict = dropout_rate
A : Dict = layer_norm_epsilon
A : Tuple = initializer_factor
A : Union[str, Any] = use_cache
A : int = eos_token_id
A : List[str] = decoder_start_token_id
# for backwards compatibility
A : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[Any] = patch_embed_hidden_size
A : Union[str, Any] = d_ff
A : Dict = dropout_rate
A : str = num_hidden_layers
A : Dict = num_attention_heads
A : Tuple = initializer_range
A : List[str] = initializer_factor
A : Union[str, Any] = attention_dropout
A : Tuple = layer_norm_eps
A : int = dense_act_fn
A : Optional[int] = seq_len
A : Tuple = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : Optional[Any] = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : str = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Any = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Dict = self.text_config.eos_token_id
A : Union[str, Any] = initializer_factor
A : Tuple = initializer_range
A : Optional[Any] = self.initializer_range
A : int = self.initializer_range
A : Tuple = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = copy.deepcopy(self.__dict__ )
A : Dict = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 311
| 1
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (CMStochasticIterativeScheduler,)
__magic_name__ = 10
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = 10
A : Union[str, Any] = self.get_scheduler_config()
A : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler.timesteps[0]
A : List[Any] = scheduler.timesteps[1]
A : List[str] = self.dummy_sample
A : List[str] = 0.1 * sample
A : List[str] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : int = self.get_scheduler_config()
A : int = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
A : int = scheduler.timesteps
A : int = torch.manual_seed(0 )
A : Optional[int] = self.dummy_model()
A : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
A : int = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
A : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
A : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
A : Any = pred_prev_sample
A : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
A : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : int = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
A : str = scheduler.timesteps
A : Any = torch.manual_seed(0 )
A : Optional[Any] = self.dummy_model()
A : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A : List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
A : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
A : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = pred_prev_sample
A : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
A : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = self.scheduler_classes[0]
A : Any = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : str = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Tuple = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[int] = [39, 30, 12, 1, 0]
A : Optional[int] = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[str] = self.scheduler_classes[0]
A : List[str] = self.get_scheduler_config()
A : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 311
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
import math
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = 0
A : Union[str, Any] = 0
while num > 0:
A : int = num % 8
A : Optional[Any] = octal + (remainder * math.floor(math.pow(10 , snake_case__ ) ))
counter += 1
A : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(snake_case__ )}'
def lowerCAmelCase_ ( ):
'''simple docstring'''
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 311
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(0 , snake_case__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(snake_case__ , 0 , -1 ):
for _ in range(snake_case__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(snake_case__ ) # upper half
reverse_floyd(snake_case__ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
lowercase : List[str] = 1
while K:
lowercase : List[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
lowercase : Any = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 311
| 1
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Tuple = np.inf
def set_batch_size(snake_case__ ) -> None:
nonlocal batch_size
if isinstance(snake_case__ , snake_case__ ):
A : Any = min(snake_case__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(snake_case__ , snake_case__ ):
A : Any = min(snake_case__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(snake_case__ , snake_case__ ) and feature.dtype == "binary":
A : List[str] = min(snake_case__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(snake_case__ , snake_case__ )
return None if batch_size is np.inf else batch_size
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE , streaming=SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : Optional[int] = path_or_paths if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A : List[Any] = Parquet(
cache_dir=SCREAMING_SNAKE_CASE , data_files=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , hash=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
if self.streaming:
A : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : Any = None
A : Optional[Any] = None
A : Tuple = None
A : int = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE , download_mode=SCREAMING_SNAKE_CASE , verification_mode=SCREAMING_SNAKE_CASE , base_path=SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : str = dataset
A : Optional[int] = path_or_buf
A : Any = batch_size or get_writer_batch_size(dataset.features )
A : Union[str, Any] = parquet_writer_kwargs
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A : Dict = self._write(file_obj=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A : Dict = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Any = 0
A : Optional[Any] = parquet_writer_kwargs.pop('''path_or_buf''' , SCREAMING_SNAKE_CASE )
A : str = self.dataset.features.arrow_schema
A : List[Any] = pq.ParquetWriter(SCREAMING_SNAKE_CASE , schema=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A : List[Any] = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 311
|
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
A : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , )
A : Optional[Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
| 311
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase : Optional[Any] = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : int = use_input_mask
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Tuple = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = initializer_range
A : Any = use_labels
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
),
) : Any = self.prepare_config_and_inputs()
A : Tuple = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = True
A : Tuple = True
A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A, A, A : Optional[int] = self.prepare_config_and_inputs()
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = BertGenerationEncoderTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs()
A : str = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311
| 1
|
'''simple docstring'''
import datasets
lowercase : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
lowercase : Union[str, Any] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
lowercase : Dict = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
| 311
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class A ( __snake_case ):
__magic_name__ = '''sigmoid'''
__magic_name__ = '''softmax'''
__magic_name__ = '''none'''
@add_end_docstrings(
__snake_case , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class A ( __snake_case ):
__magic_name__ = False
__magic_name__ = ClassificationFunction.NONE
def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Optional[Any] = tokenizer_kwargs
A : int = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
A : int = self.model.config.return_all_scores
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None:
A : Union[str, Any] = top_k
A : Dict = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , )
if return_all_scores:
A : Optional[int] = None
else:
A : Dict = 1
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A : int = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A : Any = '''top_k''' not in kwargs
if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
"""simple docstring"""
A : List[Any] = self.framework
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
A : Optional[int] = self.model.config.function_to_apply
else:
A : Optional[int] = ClassificationFunction.NONE
A : Any = model_outputs['''logits'''][0]
A : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A : int = sigmoid(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A : Any = softmax(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
A : int = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A : int = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )
if top_k is not None:
A : Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 311
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase : List[str] = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( snake_case__ = "laptop" ):
'''simple docstring'''
A : Tuple = F'https://www.amazon.in/laptop/s?k={product}'
A : Optional[int] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text )
# Initialize a Pandas dataframe with the column titles
A : List[str] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
A : Optional[Any] = item.ha.text
A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
A : Optional[int] = '''Not available'''
try:
A : str = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
A : List[Any] = ''''''
try:
A : Dict = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
A : str = float('''nan''' )
except AttributeError:
pass
A : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A : List[str] = ''' '''
A : Optional[Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase : Union[str, Any] = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase : List[str] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowerCAmelCase_ ( snake_case__ = "mumbai" ):
'''simple docstring'''
A : Optional[int] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
A : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
A : Union[str, Any] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 311
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = x
A : str = y
for step in range(snake_case__ ): # noqa: B007
A : str = a * a - b * b + x
A : List[str] = 2 * a * b + y
A : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) )
def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ):
'''simple docstring'''
A : List[Any] = Image.new('''RGB''' , (image_width, image_height) )
A : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__ ):
for image_y in range(snake_case__ ):
# determine the figure-coordinates based on the image-coordinates
A : Optional[int] = figure_width / image_width * image_height
A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height
A : str = get_distance(snake_case__ , snake_case__ , snake_case__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
A : str = get_color_coded_rgb(snake_case__ )
else:
A : List[Any] = get_black_and_white_rgb(snake_case__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 311
| 1
|
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowercase : Union[str, Any] = 'bert-base-cased'
lowercase : int = 'fp16'
lowercase : List[Any] = 'bf16'
lowercase : Optional[int] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
super().setUp()
A : List[Any] = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(SCREAMING_SNAKE_CASE ):
A : List[Any] = self.dist_env.copy()
A : Any = F'{i + 1}'
A : List[Any] = strategy
with mockenv_context(**SCREAMING_SNAKE_CASE ):
A : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = self.dist_env.copy()
A : List[Any] = prefetch_policy
with mockenv_context(**SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = self.dist_env.copy()
A : Tuple = state_dict_type
with mockenv_context(**SCREAMING_SNAKE_CASE ):
A : Dict = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
for policy in FSDP_AUTO_WRAP_POLICY:
A : Tuple = self.dist_env.copy()
A : List[Any] = policy
if policy == "TRANSFORMER_BASED_WRAP":
A : Dict = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
A : Any = '''2000'''
with mockenv_context(**SCREAMING_SNAKE_CASE ):
A : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
A : Any = self.dist_env.copy()
A : List[Any] = '''TRANSFORMER_BASED_WRAP'''
A : Optional[int] = '''T5Layer'''
with mockenv_context(**SCREAMING_SNAKE_CASE ):
A : List[Any] = FullyShardedDataParallelPlugin()
with self.assertRaises(SCREAMING_SNAKE_CASE ) as cm:
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
A : int = self.dist_env.copy()
A : Any = '''SIZE_BASED_WRAP'''
A : Optional[int] = '''0'''
with mockenv_context(**SCREAMING_SNAKE_CASE ):
A : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
A : Optional[int] = self.dist_env.copy()
A : Any = mp_dtype
with mockenv_context(**SCREAMING_SNAKE_CASE ):
A : List[Any] = Accelerator()
if mp_dtype == "fp16":
A : int = torch.floataa
elif mp_dtype == "bf16":
A : int = torch.bfloataa
A : str = MixedPrecision(param_dtype=SCREAMING_SNAKE_CASE , reduce_dtype=SCREAMING_SNAKE_CASE , buffer_dtype=SCREAMING_SNAKE_CASE )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , SCREAMING_SNAKE_CASE )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , SCREAMING_SNAKE_CASE ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
A : List[str] = self.dist_env.copy()
A : Dict = str(SCREAMING_SNAKE_CASE ).lower()
with mockenv_context(**SCREAMING_SNAKE_CASE ):
A : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=SCREAMING_SNAKE_CASE ) )
@require_fsdp
@require_multi_gpu
@slow
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
super().setUp()
A : Tuple = 0.82
A : Optional[int] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
A : str = {
'''multi_gpu_fp16''': 3200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
A : Union[str, Any] = 160
A : Any = 160
A : Union[str, Any] = inspect.getfile(accelerate.test_utils )
A : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
A : Any = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
A : str = cmd.copy()
for i, strategy in enumerate(SCREAMING_SNAKE_CASE ):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Any = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
A : Dict = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(SCREAMING_SNAKE_CASE ):
A : List[str] = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
if strategy != "FULL_SHARD":
continue
A : int = len(SCREAMING_SNAKE_CASE )
for state_dict_type in FSDP_STATE_DICT_TYPE:
A : Optional[int] = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
A : List[str] = cmd_config[:-1]
A : Dict = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Tuple = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
A : Any = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
A : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(SCREAMING_SNAKE_CASE ):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 311
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase : Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowercase : Optional[Any] = parser.parse_args()
if args.check_lib:
lowercase : List[Any] = importlib.import_module('transformers')
lowercase : str = Path(transformers_module.__file__).parent
else:
lowercase : List[Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 311
| 1
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
A : List[Any] = np.shape(snake_case__ )
A : Optional[Any] = np.shape(snake_case__ )
A : Any = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
A : Tuple = (
'''Expected the same number of rows for A and B. '''
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
A : List[Any] = (
'''Expected the same number of columns for B and C. '''
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(snake_case__ )
A : str = pseudo_inv
if a_inv is None:
try:
A : str = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A : Any = np.array([[0, 3], [3, 0], [2, 3]] )
A : Optional[int] = np.array([[2, 1], [6, 3]] )
A : int = schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Union[str, Any] = np.block([[a, b], [b.T, c]] )
A : Any = np.linalg.det(SCREAMING_SNAKE_CASE )
A : List[str] = np.linalg.det(SCREAMING_SNAKE_CASE )
A : Tuple = np.linalg.det(SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(SCREAMING_SNAKE_CASE , det_a * det_s )
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A : str = np.array([[0, 3], [3, 0], [2, 3]] )
A : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
A : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 311
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]:
"""simple docstring"""
A : List[str] = parent
A : Optional[Any] = batch_size
A : Tuple = image_size
A : int = patch_size
A : Optional[int] = num_channels
A : str = is_training
A : List[Any] = use_labels
A : Any = hidden_size
A : Any = num_hidden_layers
A : Optional[int] = num_attention_heads
A : Any = intermediate_size
A : List[str] = hidden_act
A : str = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Optional[int] = initializer_range
A : Dict = scope
A : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[Any] = (image_size // patch_size) ** 2
A : Tuple = num_patches + 2
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Tuple = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE )
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE )
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : Optional[int] = 1
A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE )
A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : str = self.type_sequence_label_size
A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE )
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Optional[Any] = 1
A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE )
A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[int] = self.prepare_config_and_inputs()
A, A, A : Tuple = config_and_inputs
A : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = TFDeiTModelTester(self )
A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(SCREAMING_SNAKE_CASE )
A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
A : Dict = self.default_image_processor
A : List[str] = prepare_img()
A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
A : Optional[int] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : Dict = size if size is not None else {'''shortest_edge''': 224}
A : Dict = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : int = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A : Any = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A : Optional[Any] = do_resize
A : Union[str, Any] = size
A : Optional[Any] = resample
A : int = do_center_crop
A : str = crop_size
A : List[str] = do_rescale
A : Dict = rescale_factor
A : Optional[Any] = do_normalize
A : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A : int = do_convert_rgb
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : int = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
A : Optional[Any] = do_resize if do_resize is not None else self.do_resize
A : Optional[Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=SCREAMING_SNAKE_CASE )
A : str = resample if resample is not None else self.resample
A : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Tuple = crop_size if crop_size is not None else self.crop_size
A : int = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=SCREAMING_SNAKE_CASE )
A : Dict = do_rescale if do_rescale is not None else self.do_rescale
A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
A : Dict = image_mean if image_mean is not None else self.image_mean
A : Any = image_std if image_std is not None else self.image_std
A : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A : str = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A : Any = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
A : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A : int = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : Optional[int] = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Optional[int] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 311
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : List[str] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311
| 1
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase : Tuple = 16
lowercase : Tuple = 32
def lowerCAmelCase_ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" ):
'''simple docstring'''
A : List[str] = AutoTokenizer.from_pretrained(snake_case__ )
A : List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : str = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
A : Tuple = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : str = config['''lr''']
A : str = int(config['''num_epochs'''] )
A : Any = int(config['''seed'''] )
A : Any = int(config['''batch_size'''] )
A : Any = args.model_name_or_path
set_seed(snake_case__ )
A, A : Optional[Any] = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[Any] = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
A : Union[str, Any] = 1
A : Optional[int] = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : Optional[int] = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A : str = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A, A, A, A, A : Union[str, Any] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A : List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
A : Tuple = 0
# Now we train the model
A : Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
A : Dict = 0
A : Union[str, Any] = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A : Union[str, Any] = model(**snake_case__ )
A : List[str] = outputs.loss
A : str = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A : Dict = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Union[str, Any] = model(**snake_case__ )
A : Union[str, Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A, A : int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A : List[str] = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
A : Union[str, Any] = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case__ , )
parser.add_argument(
'''--output_dir''' , type=snake_case__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=snake_case__ , default=snake_case__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case__ , default=3 , help='''Number of train epochs.''' , )
A : Optional[int] = parser.parse_args()
A : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 311
|
'''simple docstring'''
from __future__ import annotations
lowercase : Union[str, Any] = list[tuple[int, int]]
lowercase : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : int = pos_x
A : Optional[Any] = pos_y
A : Optional[Any] = (pos_y, pos_x)
A : str = goal_x
A : Optional[int] = goal_y
A : List[Any] = g_cost
A : str = parent
A : str = self.calculate_heuristic()
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
A : Optional[int] = abs(self.pos_x - self.goal_x )
A : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
A : Optional[Any] = [self.start]
A : list[Node] = []
A : Tuple = False
def __lowerCAmelCase ( self ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A : Optional[int] = True
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
A : Any = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
A : List[Any] = []
for action in delta:
A : List[str] = parent.pos_x + action[1]
A : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path:
"""simple docstring"""
A : int = node
A : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase : Tuple = (0, 0)
lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
lowercase : int = GreedyBestFirst(init, goal)
lowercase : Union[str, Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase : Dict = 2
for elem in grid:
print(elem)
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
A : Dict = number_of_bytes // partitions
A : Optional[Any] = []
for i in range(snake_case__ ):
A : Dict = i * bytes_per_partition + 1
A : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase : Any = 'src/transformers'
lowercase : str = 'docs/source/en/tasks'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A : Union[str, Any] = f.readlines()
# Find the start prompt.
A : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
A : List[str] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase : int = direct_transformers_import(TRANSFORMERS_PATH)
lowercase : str = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase : Optional[int] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : int = TASK_GUIDE_TO_MODELS[task_guide]
A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
A : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A, A, A : Optional[int] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
A : Optional[int] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 311
| 1
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return x + 2
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[str] = '''x = 3'''
A : Union[str, Any] = {}
A : Optional[Any] = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3} )
A : Union[str, Any] = '''x = y'''
A : Optional[Any] = {'''y''': 5}
A : Optional[Any] = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 5, '''y''': 5} )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : str = '''y = add_two(x)'''
A : Optional[Any] = {'''x''': 3}
A : Optional[int] = evaluate(SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
A : int = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = '''x = 3'''
A : Optional[int] = {}
A : Tuple = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3} )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Union[str, Any] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
A : Optional[Any] = {'''x''': 3}
A : Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE )
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = '''x = 3\ny = 5'''
A : List[Any] = {}
A : str = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Any = '''text = f\'This is x: {x}.\''''
A : str = {'''x''': 3}
A : Tuple = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Tuple = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
A : Dict = {'''x''': 3}
A : Any = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 2} )
A : Optional[Any] = {'''x''': 8}
A : Optional[int] = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 8, '''y''': 5} )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = '''test_list = [x, add_two(x)]'''
A : List[Any] = {'''x''': 3}
A : Optional[Any] = evaluate(SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_list''': [3, 5]} )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = '''y = x'''
A : str = {'''x''': 3}
A : List[str] = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 3} )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = '''test_list = [x, add_two(x)]\ntest_list[1]'''
A : int = {'''x''': 3}
A : Dict = evaluate(SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_list''': [3, 5]} )
A : List[Any] = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
A : List[Any] = {'''x''': 3}
A : Dict = evaluate(SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = '''x = 0\nfor i in range(3):\n x = i'''
A : Dict = {}
A : Optional[int] = evaluate(SCREAMING_SNAKE_CASE , {'''range''': range} , state=SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE , {'''x''': 2, '''i''': 2} )
| 311
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Tuple = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
A, A : Optional[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip()
lowercase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 311
| 1
|
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = AlbertConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A : Dict = AlbertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 311
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase : Union[str, Any] = 0
lowercase : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase : List[str] = tuple[int, int]
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
A : Union[str, Any] = pos_x
A : List[str] = pos_y
A : int = (pos_y, pos_x)
A : Optional[Any] = goal_x
A : Any = goal_y
A : int = g_cost
A : List[str] = parent
A : List[Any] = self.calculate_heuristic()
A : Any = self.g_cost + self.h_cost
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
A : Optional[Any] = self.pos_x - self.goal_x
A : Dict = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE ) + abs(SCREAMING_SNAKE_CASE )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
A : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
A : Tuple = [self.start]
A : list[Node] = []
A : Union[str, Any] = False
def __lowerCAmelCase ( self ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
A : Optional[int] = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
return [self.start.pos]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
A : str = []
for action in delta:
A : Any = parent.pos_x + action[1]
A : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[TPosition]:
"""simple docstring"""
A : str = node
A : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : Dict = current_node.parent
path.reverse()
return path
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : int = AStar(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = AStar(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[Any] = False
def __lowerCAmelCase ( self ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A : Dict = self.fwd_astar.open_nodes.pop(0 )
A : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE )
A : Optional[Any] = current_bwd_node
A : int = current_fwd_node
A : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A : str = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[TPosition]:
"""simple docstring"""
A : Tuple = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE )
A : int = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
A : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase : Union[str, Any] = (0, 0)
lowercase : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase : str = time.time()
lowercase : int = AStar(init, goal)
lowercase : int = a_star.search()
lowercase : str = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
lowercase : List[Any] = time.time()
lowercase : str = BidirectionalAStar(init, goal)
lowercase : Optional[int] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 311
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
A : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ):
A : List[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : int = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
A : Dict = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 311
| 1
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase : str = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Tuple = list(s_dict.keys() )
for key in keys:
A : List[str] = R'''.*/layers_(\d+)'''
A : int = key
if re.match(snake_case__ , snake_case__ ):
A : List[str] = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , snake_case__ )
A : Optional[Any] = R'''(encoder|decoder)\/'''
if re.match(snake_case__ , snake_case__ ):
A : Optional[int] = re.match(snake_case__ , snake_case__ ).groups()
if groups[0] == "encoder":
A : Tuple = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , snake_case__ )
A : Optional[int] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , snake_case__ )
elif groups[0] == "decoder":
A : str = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , snake_case__ )
A : List[str] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , snake_case__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A : List[str] = new_key.replace(snake_case__ , snake_case__ )
print(F'{key} -> {new_key}' )
A : Optional[Any] = s_dict.pop(snake_case__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A : Union[str, Any] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A : str = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A : Union[str, Any] = s_dict[key].shape[0]
A : str = s_dict[key]
for idx in range(snake_case__ ):
A : Union[str, Any] = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(snake_case__ )
return s_dict
lowercase : Dict = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import regex as re
with open(snake_case__ , '''r''' ) as f:
A : Optional[Any] = f.read()
A : Union[str, Any] = re.findall(R'''(.*) = ([0-9.]*)''' , snake_case__ )
A : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A : Any = float(snake_case__ ) if '''.''' in value else int(snake_case__ )
A : str = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , snake_case__ )[0]
A : Union[str, Any] = str(activation[1] )
A : List[Any] = num_experts
A : int = SwitchTransformersConfig(**snake_case__ )
return config
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__="./" , snake_case__=8 ):
'''simple docstring'''
print(F'Loading flax weights from : {flax_checkpoint_path}' )
A : Tuple = checkpoints.load_tax_checkpoint(snake_case__ )
if gin_file is not None:
A : str = convert_gin_to_config(snake_case__ , snake_case__ )
else:
A : Union[str, Any] = SwitchTransformersConfig.from_pretrained(snake_case__ )
A : Dict = SwitchTransformersForConditionalGeneration(snake_case__ )
A : Tuple = flax_params['''target''']
A : Tuple = flatten_dict(snake_case__ , sep='''/''' )
A : str = rename_keys(snake_case__ )
A : Dict = unflatten_dict(snake_case__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
lowercase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 311
|
'''simple docstring'''
from __future__ import annotations
from random import random
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = value
A : Any = random()
A : Node | None = None
A : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
A : Optional[Any] = str(self.value ) + ''' '''
A : Union[str, Any] = str(self.left or '''''' )
A : Any = str(self.right or '''''' )
return value + left + right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A, A : Any = split(root.left , snake_case__ )
return left, root
else:
A, A : Optional[int] = split(root.right , snake_case__ )
return root, right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A : List[str] = merge(left.right , snake_case__ )
return left
else:
A : Tuple = merge(snake_case__ , right.left )
return right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Node(snake_case__ )
A, A : Tuple = split(snake_case__ , snake_case__ )
return merge(merge(snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : Dict = split(snake_case__ , value - 1 )
A, A : Any = split(snake_case__ , snake_case__ )
return merge(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
A : int = insert(snake_case__ , int(arg[1:] ) )
elif arg[0] == "-":
A : int = erase(snake_case__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
A : Optional[int] = input()
while args != "q":
A : str = interact_treap(snake_case__ , snake_case__ )
print(snake_case__ )
A : Union[str, Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = MvpTokenizer
__magic_name__ = MvpTokenizerFast
__magic_name__ = True
__magic_name__ = filter_roberta_detectors
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
A : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
A : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
A : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A : Any = {'''unk_token''': '''<unk>'''}
A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A : int = tokenizer(SCREAMING_SNAKE_CASE , max_length=len(SCREAMING_SNAKE_CASE ) , padding=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A : str = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , SCREAMING_SNAKE_CASE )
self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE )
self.assertNotIn('''labels''' , SCREAMING_SNAKE_CASE )
self.assertNotIn('''decoder_attention_mask''' , SCREAMING_SNAKE_CASE )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A : List[Any] = tokenizer(text_target=SCREAMING_SNAKE_CASE , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A : Tuple = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Tuple = ['''A long paragraph for summarization.''']
A : Optional[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A : int = tokenizer(SCREAMING_SNAKE_CASE , text_target=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
A : Optional[Any] = inputs['''input_ids''']
A : Optional[int] = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Dict = '''A, <mask> AllenNLP sentence.'''
A : Optional[Any] = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
A : Dict = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
A : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
A : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 311
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = '''bilinear'''
A : Optional[int] = max_size
A : Dict = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Tuple = []
for img in imgs:
A, A : str = img.shape[:2]
# later: provide list and randomly choose index for resize
A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if h < w:
A, A : Tuple = size, scale * w
else:
A, A : str = scale * h, size
if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size:
A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Tuple = newh * scale
A : int = neww * scale
A : List[str] = int(neww + 0.5 )
A : int = int(newh + 0.5 )
if img.dtype == np.uinta:
A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE )
A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A : str = np.asarray(SCREAMING_SNAKE_CASE )
else:
A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A : List[Any] = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE )
return img_augs
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A : str = cfg.INPUT.FORMAT
A : int = cfg.SIZE_DIVISIBILITY
A : Optional[int] = cfg.PAD_VALUE
A : Dict = cfg.INPUT.MAX_SIZE_TEST
A : Optional[Any] = cfg.MODEL.DEVICE
A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
A : List[str] = [im.shape[-2:] for im in images]
A : Optional[Any] = [
nn.functional.pad(
SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : str = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE ) == 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A : Tuple = torch.tensor([im.shape[:2] for im in images] )
A : Dict = self.aug(SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!"
A, A : str = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__ )
tensor[:, 1].clamp_(min=0 , max=snake_case__ )
tensor[:, 2].clamp_(min=0 , max=snake_case__ )
tensor[:, 3].clamp_(min=0 , max=snake_case__ )
| 311
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = int(np.ceil((x_end - xa) / step_size ) )
A : Optional[int] = np.zeros((n + 1,) )
A : List[Any] = ya
A : Tuple = xa
for k in range(snake_case__ ):
A : str = y[k] + step_size * ode_func(snake_case__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowercase : Tuple = parser.parse_args()
lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 311
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''sentencepiece''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
| 311
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase : str = datasets.utils.logging.get_logger(__name__)
lowercase : Union[str, Any] = ['names', 'prefix']
lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase : List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase : Any = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
__magic_name__ = ","
__magic_name__ = None
__magic_name__ = "infer"
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = "."
__magic_name__ = None
__magic_name__ = '"'
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = None
__magic_name__ = 10000
__magic_name__ = None
__magic_name__ = "strict"
__magic_name__ = "error"
__magic_name__ = None
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
A : Optional[Any] = self.delimiter
if self.column_names is not None:
A : Optional[Any] = self.column_names
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
__magic_name__ = CsvConfig
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ):
A : str = data_files
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = [files]
A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A : Tuple = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[str] = [files]
A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
A : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return pa_table
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ):
A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ):
A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 311
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Dict = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class A ( __snake_case ):
__magic_name__ = '''canine'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=16384 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0XE_000 , SCREAMING_SNAKE_CASE=0XE_001 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=16384 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : List[str] = max_position_embeddings
A : Any = hidden_size
A : List[str] = num_hidden_layers
A : Any = num_attention_heads
A : Dict = intermediate_size
A : str = hidden_act
A : Optional[int] = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : str = initializer_range
A : List[str] = type_vocab_size
A : List[Any] = layer_norm_eps
# Character config:
A : Optional[int] = downsampling_rate
A : int = upsampling_kernel_size
A : Any = num_hash_functions
A : List[str] = num_hash_buckets
A : int = local_transformer_stride
| 311
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : int = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class A ( __snake_case ):
__magic_name__ = '''sew'''
def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
A : Optional[Any] = hidden_size
A : Any = feat_extract_norm
A : Optional[int] = feat_extract_activation
A : Tuple = list(SCREAMING_SNAKE_CASE )
A : List[str] = list(SCREAMING_SNAKE_CASE )
A : List[str] = list(SCREAMING_SNAKE_CASE )
A : int = conv_bias
A : List[Any] = num_conv_pos_embeddings
A : Tuple = num_conv_pos_embedding_groups
A : int = len(self.conv_dim )
A : Dict = num_hidden_layers
A : Optional[int] = intermediate_size
A : Any = squeeze_factor
A : int = hidden_act
A : str = num_attention_heads
A : Dict = hidden_dropout
A : Optional[Any] = attention_dropout
A : List[str] = activation_dropout
A : Union[str, Any] = feat_proj_dropout
A : Union[str, Any] = final_dropout
A : int = layerdrop
A : Optional[Any] = layer_norm_eps
A : Any = initializer_range
A : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : Optional[Any] = apply_spec_augment
A : Optional[Any] = mask_time_prob
A : Union[str, Any] = mask_time_length
A : Optional[Any] = mask_time_min_masks
A : str = mask_feature_prob
A : Tuple = mask_feature_length
A : Any = mask_feature_min_masks
# ctc loss
A : List[Any] = ctc_loss_reduction
A : Dict = ctc_zero_infinity
# sequence classification
A : int = use_weighted_layer_sum
A : Optional[int] = classifier_proj_size
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 311
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase : Optional[Any] = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = mock.Mock()
A : str = 500
A : Dict = {}
A : Union[str, Any] = HTTPError
A : List[Any] = {}
# Download this model to make sure it's in the cache.
A : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE ) as mock_head:
A : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : str = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls ) -> Any:
"""simple docstring"""
A : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls ) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE , repo_id='''test-feature-extractor''' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : int = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
A : Any = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : Dict = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
A : Dict = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
A : List[str] = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 311
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = SwinConfig()
A : List[Any] = swin_name.split('''_''' )
A : Tuple = name_split[1]
A : Union[str, Any] = int(name_split[4] )
A : str = int(name_split[3][-1] )
if model_size == "tiny":
A : Optional[int] = 96
A : Optional[Any] = (2, 2, 6, 2)
A : Any = (3, 6, 12, 24)
elif model_size == "small":
A : Optional[int] = 96
A : str = (2, 2, 18, 2)
A : Tuple = (3, 6, 12, 24)
elif model_size == "base":
A : int = 128
A : Optional[Any] = (2, 2, 18, 2)
A : List[str] = (4, 8, 16, 32)
else:
A : Dict = 192
A : Optional[Any] = (2, 2, 18, 2)
A : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
A : Dict = 2_1841
else:
A : str = 1000
A : List[str] = '''huggingface/label-files'''
A : Any = '''imagenet-1k-id2label.json'''
A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
A : str = {int(snake_case__ ): v for k, v in idalabel.items()}
A : Tuple = idalabel
A : Tuple = {v: k for k, v in idalabel.items()}
A : Tuple = img_size
A : Dict = num_classes
A : Optional[Any] = embed_dim
A : str = depths
A : str = num_heads
A : Optional[int] = window_size
return config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if "patch_embed.proj" in name:
A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
A : Optional[int] = '''encoder.''' + name
if "attn.proj" in name:
A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
A : Tuple = '''layernorm.weight'''
if name == "norm.bias":
A : Tuple = '''layernorm.bias'''
if "head" in name:
A : Any = name.replace('''head''' , '''classifier''' )
else:
A : List[Any] = '''swin.''' + name
return name
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A : Dict = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A : Dict = key.split('''.''' )
A : Optional[int] = int(key_split[1] )
A : List[str] = int(key_split[3] )
A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A : Any = val[:dim, :]
A : Dict = val[
dim : dim * 2, :
]
A : List[str] = val[-dim:, :]
else:
A : Any = val[
:dim
]
A : Optional[int] = val[
dim : dim * 2
]
A : Any = val[
-dim:
]
else:
A : str = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A : Optional[Any] = get_swin_config(snake_case__ )
A : Optional[int] = SwinForImageClassification(snake_case__ )
model.eval()
A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' )
A : Any = timm_model(inputs['''pixel_values'''] )
A : Optional[Any] = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase : int = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 311
| 1
|
'''simple docstring'''
lowercase : Tuple = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = vocab_size
A : List[str] = hidden_size
A : List[Any] = d_kv
A : Optional[Any] = d_ff
A : Dict = num_layers
A : Dict = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : Dict = dropout_rate
A : Dict = layer_norm_epsilon
A : Tuple = initializer_factor
A : Union[str, Any] = use_cache
A : int = eos_token_id
A : List[str] = decoder_start_token_id
# for backwards compatibility
A : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[Any] = patch_embed_hidden_size
A : Union[str, Any] = d_ff
A : Dict = dropout_rate
A : str = num_hidden_layers
A : Dict = num_attention_heads
A : Tuple = initializer_range
A : List[str] = initializer_factor
A : Union[str, Any] = attention_dropout
A : Tuple = layer_norm_eps
A : int = dense_act_fn
A : Optional[int] = seq_len
A : Tuple = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : Optional[Any] = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : str = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Any = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Dict = self.text_config.eos_token_id
A : Union[str, Any] = initializer_factor
A : Tuple = initializer_range
A : Optional[Any] = self.initializer_range
A : int = self.initializer_range
A : Tuple = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = copy.deepcopy(self.__dict__ )
A : Dict = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 311
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {'vocab_file': 'spiece.model'}
lowercase : Tuple = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
lowercase : List[Any] = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
lowercase : Any = '▁'
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
A : Dict = (
AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE , normalized=SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else mask_token
)
A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
A : List[Any] = do_lower_case
A : int = remove_space
A : str = keep_accents
A : Any = vocab_file
A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
"""simple docstring"""
A : Dict = self.__dict__.copy()
A : Dict = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A : Dict = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if self.remove_space:
A : Optional[int] = ''' '''.join(inputs.strip().split() )
else:
A : Dict = inputs
A : int = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
A : List[str] = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE )
A : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
A : List[Any] = outputs.lower()
return outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Dict = self.preprocess_text(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
A : Dict = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A : Dict = cur_pieces[1:]
else:
A : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE )
else:
new_pieces.append(SCREAMING_SNAKE_CASE )
return new_pieces
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : List[Any] = []
A : int = ''''''
A : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
A : Any = True
A : Optional[int] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
A : Dict = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string.strip()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : Dict = [self.sep_token_id]
A : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : Tuple = [self.sep_token_id]
A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A : List[str] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
A : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 311
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = '''bilinear'''
A : Optional[int] = max_size
A : Dict = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Tuple = []
for img in imgs:
A, A : str = img.shape[:2]
# later: provide list and randomly choose index for resize
A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if h < w:
A, A : Tuple = size, scale * w
else:
A, A : str = scale * h, size
if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size:
A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Tuple = newh * scale
A : int = neww * scale
A : List[str] = int(neww + 0.5 )
A : int = int(newh + 0.5 )
if img.dtype == np.uinta:
A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE )
A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A : str = np.asarray(SCREAMING_SNAKE_CASE )
else:
A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A : List[Any] = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE )
return img_augs
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A : str = cfg.INPUT.FORMAT
A : int = cfg.SIZE_DIVISIBILITY
A : Optional[int] = cfg.PAD_VALUE
A : Dict = cfg.INPUT.MAX_SIZE_TEST
A : Optional[Any] = cfg.MODEL.DEVICE
A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
A : List[str] = [im.shape[-2:] for im in images]
A : Optional[Any] = [
nn.functional.pad(
SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : str = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE ) == 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A : Tuple = torch.tensor([im.shape[:2] for im in images] )
A : Dict = self.aug(SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!"
A, A : str = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__ )
tensor[:, 1].clamp_(min=0 , max=snake_case__ )
tensor[:, 2].clamp_(min=0 , max=snake_case__ )
tensor[:, 3].clamp_(min=0 , max=snake_case__ )
| 311
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(0 , snake_case__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(snake_case__ , 0 , -1 ):
for _ in range(snake_case__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(snake_case__ ) # upper half
reverse_floyd(snake_case__ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
lowercase : List[str] = 1
while K:
lowercase : List[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
lowercase : Any = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 311
| 1
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase : Dict = logging.get_logger(__name__)
# General docstring
lowercase : str = 'MobileNetV1Config'
# Base docstring
lowercase : str = 'google/mobilenet_v1_1.0_224'
lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
lowercase : Dict = 'tabby, tabby cat'
lowercase : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
A : int = {}
if isinstance(snake_case__ , snake_case__ ):
A : Optional[Any] = model.mobilenet_va
else:
A : Optional[Any] = model
A : Dict = '''MobilenetV1/Conv2d_0/'''
A : List[Any] = backbone.conv_stem.convolution.weight
A : List[Any] = backbone.conv_stem.normalization.bias
A : int = backbone.conv_stem.normalization.weight
A : List[str] = backbone.conv_stem.normalization.running_mean
A : Union[str, Any] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A : List[str] = i + 1
A : Dict = i * 2
A : Any = backbone.layer[pt_index]
A : Dict = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
A : Union[str, Any] = pointer.convolution.weight
A : Union[str, Any] = pointer.normalization.bias
A : Any = pointer.normalization.weight
A : int = pointer.normalization.running_mean
A : int = pointer.normalization.running_var
A : Optional[Any] = backbone.layer[pt_index + 1]
A : Optional[int] = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
A : List[str] = pointer.convolution.weight
A : str = pointer.normalization.bias
A : Union[str, Any] = pointer.normalization.weight
A : Union[str, Any] = pointer.normalization.running_mean
A : Optional[Any] = pointer.normalization.running_var
if isinstance(snake_case__ , snake_case__ ):
A : Tuple = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
A : int = model.classifier.weight
A : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
A : str = tf.train.list_variables(snake_case__ )
A : List[str] = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
A : List[Any] = tf.train.load_variable(snake_case__ , snake_case__ )
A : Union[str, Any] = array
# Build TF to PyTorch weights loading map
A : Tuple = _build_tf_to_pytorch_map(snake_case__ , snake_case__ , snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
A : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
A : int = np.transpose(snake_case__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
A : Any = array.squeeze().transpose()
else:
A : List[str] = np.transpose(snake_case__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
A : Optional[Any] = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ , snake_case__ )
tf_weights.pop(name + '''/RMSProp''' , snake_case__ )
tf_weights.pop(name + '''/RMSProp_1''' , snake_case__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : int = features.shape[-2:]
A, A : Any = conv_layer.stride
A, A : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
A : Optional[Any] = max(kernel_height - stride_height , 0 )
else:
A : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A : Tuple = max(kernel_width - stride_width , 0 )
else:
A : Optional[Any] = max(kernel_width - (in_width % stride_width) , 0 )
A : Union[str, Any] = pad_along_width // 2
A : Optional[Any] = pad_along_width - pad_left
A : int = pad_along_height // 2
A : str = pad_along_height - pad_top
A : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ , snake_case__ , '''constant''' , 0.0 )
class A ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , ) -> None:
"""simple docstring"""
super().__init__()
A : int = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
A : Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A : List[Any] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , kernel_size=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , groups=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , padding_mode='''zeros''' , )
if use_normalization:
A : str = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE , eps=config.layer_norm_eps , momentum=0.9_997 , affine=SCREAMING_SNAKE_CASE , track_running_stats=SCREAMING_SNAKE_CASE , )
else:
A : Optional[int] = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , SCREAMING_SNAKE_CASE ):
A : Tuple = ACTaFN[config.hidden_act]
else:
A : Any = config.hidden_act
else:
A : Union[str, Any] = None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
A : List[str] = apply_tf_padding(SCREAMING_SNAKE_CASE , self.convolution )
A : str = self.convolution(SCREAMING_SNAKE_CASE )
if self.normalization is not None:
A : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE )
if self.activation is not None:
A : Optional[Any] = self.activation(SCREAMING_SNAKE_CASE )
return features
class A ( __snake_case ):
__magic_name__ = MobileNetVaConfig
__magic_name__ = load_tf_weights_in_mobilenet_va
__magic_name__ = '''mobilenet_v1'''
__magic_name__ = '''pixel_values'''
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase : Union[str, Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase : str = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __snake_case , )
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ) -> Optional[int]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
A : Optional[int] = config
A : int = 32
A : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
A : Any = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE , in_channels=config.num_channels , out_channels=SCREAMING_SNAKE_CASE , kernel_size=3 , stride=2 , )
A : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A : str = nn.ModuleList()
for i in range(13 ):
A : List[str] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , kernel_size=3 , stride=strides[i] , groups=SCREAMING_SNAKE_CASE , ) )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , kernel_size=1 , ) )
A : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
A : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
A : str = self.conv_stem(SCREAMING_SNAKE_CASE )
A : Optional[int] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A : Tuple = layer_module(SCREAMING_SNAKE_CASE )
if output_hidden_states:
A : Union[str, Any] = all_hidden_states + (hidden_states,)
A : List[str] = hidden_states
if self.pooler is not None:
A : str = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE ) , start_dim=1 )
else:
A : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE , pooler_output=SCREAMING_SNAKE_CASE , hidden_states=SCREAMING_SNAKE_CASE , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __snake_case , )
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
A : List[Any] = config.num_labels
A : Union[str, Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE )
A : List[Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A : List[str] = nn.Dropout(config.classifier_dropout_prob , inplace=SCREAMING_SNAKE_CASE )
A : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
A : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
A : List[str] = self.mobilenet_va(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
A : List[str] = outputs.pooler_output if return_dict else outputs[1]
A : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE ) )
A : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A : Any = '''single_label_classification'''
else:
A : Union[str, Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
A : Dict = MSELoss()
if self.num_labels == 1:
A : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A : Optional[Any] = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
A : Union[str, Any] = CrossEntropyLoss()
A : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A : Dict = BCEWithLogitsLoss()
A : str = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not return_dict:
A : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , )
| 311
|
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
A : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , )
A : Optional[Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
| 311
| 1
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = 'T5Config'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = jnp.zeros_like(snake_case__ )
A : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A : Optional[Any] = shifted_input_ids.at[:, 0].set(snake_case__ )
A : Union[str, Any] = jnp.where(shifted_input_ids == -100 , snake_case__ , snake_case__ )
return shifted_input_ids
class A ( __snake_case ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
class A ( __snake_case ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
class A ( __snake_case ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
| 311
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : int = use_input_mask
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Tuple = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = initializer_range
A : Any = use_labels
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
),
) : Any = self.prepare_config_and_inputs()
A : Tuple = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = True
A : Tuple = True
A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A, A, A : Optional[int] = self.prepare_config_and_inputs()
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = BertGenerationEncoderTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs()
A : str = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311
| 1
|
'''simple docstring'''
import argparse
lowercase : Dict = 'docs/source/_static/js/custom.js'
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
with open(snake_case__ , encoding='''utf-8''' , newline='''\n''' ) as f:
A : str = f.readlines()
A : int = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
A : Any = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(snake_case__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case__ )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowercase : str = parser.parse_args()
update_custom_js(args.version)
| 311
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class A ( __snake_case ):
__magic_name__ = '''sigmoid'''
__magic_name__ = '''softmax'''
__magic_name__ = '''none'''
@add_end_docstrings(
__snake_case , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class A ( __snake_case ):
__magic_name__ = False
__magic_name__ = ClassificationFunction.NONE
def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Optional[Any] = tokenizer_kwargs
A : int = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
A : int = self.model.config.return_all_scores
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None:
A : Union[str, Any] = top_k
A : Dict = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , )
if return_all_scores:
A : Optional[int] = None
else:
A : Dict = 1
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A : int = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A : Any = '''top_k''' not in kwargs
if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
"""simple docstring"""
A : List[Any] = self.framework
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
A : Optional[int] = self.model.config.function_to_apply
else:
A : Optional[int] = ClassificationFunction.NONE
A : Any = model_outputs['''logits'''][0]
A : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A : int = sigmoid(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A : Any = softmax(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
A : int = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A : int = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )
if top_k is not None:
A : Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 311
| 1
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class A ( __snake_case ):
__magic_name__ = '''sigmoid'''
__magic_name__ = '''softmax'''
__magic_name__ = '''none'''
@add_end_docstrings(
__snake_case , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class A ( __snake_case ):
__magic_name__ = False
__magic_name__ = ClassificationFunction.NONE
def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Optional[Any] = tokenizer_kwargs
A : int = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
A : int = self.model.config.return_all_scores
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None:
A : Union[str, Any] = top_k
A : Dict = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , )
if return_all_scores:
A : Optional[int] = None
else:
A : Dict = 1
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A : int = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A : Any = '''top_k''' not in kwargs
if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
"""simple docstring"""
A : List[Any] = self.framework
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
A : Optional[int] = self.model.config.function_to_apply
else:
A : Optional[int] = ClassificationFunction.NONE
A : Any = model_outputs['''logits'''][0]
A : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A : int = sigmoid(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A : Any = softmax(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
A : int = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A : int = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )
if top_k is not None:
A : Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 311
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( snake_case__ = "laptop" ):
'''simple docstring'''
A : Tuple = F'https://www.amazon.in/laptop/s?k={product}'
A : Optional[int] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text )
# Initialize a Pandas dataframe with the column titles
A : List[str] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
A : Optional[Any] = item.ha.text
A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
A : Optional[int] = '''Not available'''
try:
A : str = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
A : List[Any] = ''''''
try:
A : Dict = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
A : str = float('''nan''' )
except AttributeError:
pass
A : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A : List[str] = ''' '''
A : Optional[Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase : Union[str, Any] = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 311
| 1
|
'''simple docstring'''
import cva
import numpy as np
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if k in (0.04, 0.06):
A : str = k
A : Dict = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
"""simple docstring"""
return str(self.k )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
A : List[Any] = cva.imread(SCREAMING_SNAKE_CASE , 0 )
A, A : Dict = img.shape
A : list[list[int]] = []
A : List[str] = img.copy()
A : str = cva.cvtColor(SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB )
A, A : Any = np.gradient(SCREAMING_SNAKE_CASE )
A : List[str] = dx**2
A : Dict = dy**2
A : Optional[Any] = dx * dy
A : List[Any] = 0.04
A : int = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE , h - offset ):
for x in range(SCREAMING_SNAKE_CASE , w - offset ):
A : Tuple = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : Any = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : int = (wxx * wyy) - (wxy**2)
A : Optional[Any] = wxx + wyy
A : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowercase : Optional[int] = HarrisCorner(0.04, 3)
lowercase , lowercase : Dict = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 311
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = x
A : str = y
for step in range(snake_case__ ): # noqa: B007
A : str = a * a - b * b + x
A : List[str] = 2 * a * b + y
A : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) )
def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ):
'''simple docstring'''
A : List[Any] = Image.new('''RGB''' , (image_width, image_height) )
A : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__ ):
for image_y in range(snake_case__ ):
# determine the figure-coordinates based on the image-coordinates
A : Optional[int] = figure_width / image_width * image_height
A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height
A : str = get_distance(snake_case__ , snake_case__ , snake_case__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
A : str = get_color_coded_rgb(snake_case__ )
else:
A : List[Any] = get_black_and_white_rgb(snake_case__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 311
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase : int = 2
class A :
def __init__( self , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE=None , ) -> Any:
"""simple docstring"""
A, A, A, A : str = bos, unk, pad, eos
A : Tuple = []
A : List[str] = []
A : Dict = {}
A : int = self.add_symbol(SCREAMING_SNAKE_CASE )
A : Optional[int] = self.add_symbol(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = self.add_symbol(SCREAMING_SNAKE_CASE )
A : Any = self.add_symbol(SCREAMING_SNAKE_CASE )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE )
A : Optional[int] = len(self.symbols )
def __eq__( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : str = cls()
d.add_from_file(SCREAMING_SNAKE_CASE )
return d
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
if word in self.indices and not overwrite:
A : Optional[int] = self.indices[word]
A : Optional[int] = self.count[idx] + n
return idx
else:
A : Optional[int] = len(self.symbols )
A : List[str] = idx
self.symbols.append(SCREAMING_SNAKE_CASE )
self.count.append(SCREAMING_SNAKE_CASE )
return idx
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return 0
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(SCREAMING_SNAKE_CASE ) )
return
A : str = f.readlines()
A : List[Any] = self._load_meta(SCREAMING_SNAKE_CASE )
for line in lines[indices_start_line:]:
try:
A, A : List[Any] = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
A : Optional[Any] = True
A, A : Union[str, Any] = line.rsplit(''' ''' , 1 )
else:
A : int = False
A : Optional[int] = int(SCREAMING_SNAKE_CASE )
A : Dict = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(SCREAMING_SNAKE_CASE ) )
self.add_symbol(SCREAMING_SNAKE_CASE , n=SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = dict((re.sub(R'''@@$''' , '''''' , snake_case__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , snake_case__ ), v) for k, v in d.items() )
A : Dict = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : Tuple = d[k] # restore
return da
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if not os.path.exists(snake_case__ ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[Any] = os.path.join(snake_case__ , '''checkpoint.pt''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Union[str, Any] = torch.load(snake_case__ , map_location='''cpu''' )
A : Optional[Any] = chkpt['''cfg''']['''model''']
# dicts
A : Tuple = os.path.join(snake_case__ , '''dict.txt''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Any = Dictionary.load(snake_case__ )
A : Any = rewrite_dict_keys(src_dict.indices )
A : int = len(snake_case__ )
A : Tuple = os.path.join(snake_case__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) )
# merges_file (bpecodes)
A : Union[str, Any] = os.path.join(snake_case__ , '''bpecodes''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : str = os.path.join(snake_case__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(snake_case__ , snake_case__ )
# model config
A : Any = os.path.join(snake_case__ , '''config.json''' )
A : Optional[int] = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) )
# tokenizer config
A : Optional[int] = os.path.join(snake_case__ , snake_case__ )
A : str = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) )
# model
A : Tuple = chkpt['''model''']
# remove unneeded keys
A : Union[str, Any] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(snake_case__ , snake_case__ )
A : Tuple = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
A : Tuple = model_state_dict.pop(snake_case__ )
else:
A : Union[str, Any] = model_state_dict.pop(snake_case__ )
A : Tuple = BioGptConfig.from_pretrained(snake_case__ )
A : Dict = BioGptForCausalLM(snake_case__ )
# check that it loads ok
model_new.load_state_dict(snake_case__ )
# save
A : Any = os.path.join(snake_case__ , snake_case__ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case__ , snake_case__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : List[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 311
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase : Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowercase : Optional[Any] = parser.parse_args()
if args.check_lib:
lowercase : List[Any] = importlib.import_module('transformers')
lowercase : str = Path(transformers_module.__file__).parent
else:
lowercase : List[Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 311
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
"""simple docstring"""
A : Any = parent
A : str = batch_size
A : Optional[Any] = image_size
A : Dict = patch_size
A : List[str] = num_channels
A : Optional[int] = is_training
A : List[str] = use_labels
A : Tuple = hidden_size
A : Optional[int] = num_hidden_layers
A : Optional[Any] = num_attention_heads
A : int = intermediate_size
A : Union[str, Any] = hidden_act
A : Dict = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Dict = type_sequence_label_size
A : List[Any] = initializer_range
A : int = scope
A : Dict = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A : Tuple = (self.image_size // 32) ** 2
A : Union[str, Any] = num_patches + 1
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Union[str, Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = ViTHybridModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Optional[int] = self.type_sequence_label_size
A : Dict = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Dict = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = self.prepare_config_and_inputs()
A, A, A : Tuple = config_and_inputs
A : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[int] = ViTHybridModelTester(self )
A : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A, A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(SCREAMING_SNAKE_CASE )
A : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Tuple = [*signature.parameters.keys()]
A : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A, A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A : List[Any] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[int] = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
A : List[str] = self.default_image_processor
A : Any = prepare_img()
A : List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : List[str] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
@require_accelerate
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
A : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
A : Any = prepare_img()
A : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
A : Optional[int] = model(**SCREAMING_SNAKE_CASE )
A : List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
A : Any = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 311
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]:
"""simple docstring"""
A : List[str] = parent
A : Optional[Any] = batch_size
A : Tuple = image_size
A : int = patch_size
A : Optional[int] = num_channels
A : str = is_training
A : List[Any] = use_labels
A : Any = hidden_size
A : Any = num_hidden_layers
A : Optional[int] = num_attention_heads
A : Any = intermediate_size
A : List[str] = hidden_act
A : str = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Optional[int] = initializer_range
A : Dict = scope
A : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[Any] = (image_size // patch_size) ** 2
A : Tuple = num_patches + 2
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Tuple = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE )
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE )
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : Optional[int] = 1
A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE )
A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : str = self.type_sequence_label_size
A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE )
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Optional[Any] = 1
A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE )
A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[int] = self.prepare_config_and_inputs()
A, A, A : Tuple = config_and_inputs
A : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = TFDeiTModelTester(self )
A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(SCREAMING_SNAKE_CASE )
A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
A : Dict = self.default_image_processor
A : List[str] = prepare_img()
A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
A : Optional[int] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
import bisect
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = 0 , snake_case__ = -1 ):
'''simple docstring'''
if hi < 0:
A : Optional[int] = len(snake_case__ )
while lo < hi:
A : str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
A : List[Any] = mid + 1
else:
A : List[Any] = mid
return lo
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = 0 , snake_case__ = -1 ):
'''simple docstring'''
if hi < 0:
A : Optional[int] = len(snake_case__ )
while lo < hi:
A : Dict = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
A : Optional[Any] = mid + 1
else:
A : Optional[int] = mid
return lo
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = 0 , snake_case__ = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = 0 , snake_case__ = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = 0
A : List[Any] = len(snake_case__ ) - 1
while left <= right:
A : List[str] = left + (right - left) // 2
A : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
A : str = midpoint - 1
else:
A : Any = midpoint + 1
return None
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = bisect.bisect_left(snake_case__ , snake_case__ )
if index != len(snake_case__ ) and sorted_collection[index] == item:
return index
return None
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if right < left:
return None
A : Optional[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case__ , snake_case__ , midpoint + 1 , snake_case__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = input('Enter numbers separated by comma:\n').strip()
lowercase : Union[str, Any] = sorted(int(item) for item in user_input.split(','))
lowercase : List[str] = int(input('Enter a single number to be found in the list:\n'))
lowercase : List[str] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 311
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : List[str] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : str = inspect.getfile(accelerate.test_utils )
A : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
A : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
A : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
A : List[Any] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
A : int = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
A : Optional[Any] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
lowercase : int = Accelerator()
lowercase : Optional[Any] = (accelerator.state.process_index + 2, 10)
lowercase : List[Any] = torch.randint(0, 10, shape).to(accelerator.device)
lowercase : List[Any] = ''
lowercase : Optional[Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase : Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
|
'''simple docstring'''
from __future__ import annotations
lowercase : Union[str, Any] = list[tuple[int, int]]
lowercase : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : int = pos_x
A : Optional[Any] = pos_y
A : Optional[Any] = (pos_y, pos_x)
A : str = goal_x
A : Optional[int] = goal_y
A : List[Any] = g_cost
A : str = parent
A : str = self.calculate_heuristic()
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
A : Optional[int] = abs(self.pos_x - self.goal_x )
A : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
A : Optional[Any] = [self.start]
A : list[Node] = []
A : Tuple = False
def __lowerCAmelCase ( self ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A : Optional[int] = True
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
A : Any = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
A : List[Any] = []
for action in delta:
A : List[str] = parent.pos_x + action[1]
A : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path:
"""simple docstring"""
A : int = node
A : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase : Tuple = (0, 0)
lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
lowercase : int = GreedyBestFirst(init, goal)
lowercase : Union[str, Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase : Dict = 2
for elem in grid:
print(elem)
| 311
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class A :
__magic_name__ = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
__magic_name__ = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
__magic_name__ = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = HfArgumentParser((ModelArguments,) )
((A), ) : str = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A : str = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A : Any = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A : int = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A : Any = True
A : Dict = True
A : List[str] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case__ , decoder_config=snake_case__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A : int = decoder_config.decoder_start_token_id
A : int = decoder_config.pad_token_id
if decoder_start_token_id is None:
A : Tuple = decoder_config.bos_token_id
if pad_token_id is None:
A : Union[str, Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A : List[str] = decoder_config.eos_token_id
A : List[Any] = decoder_start_token_id
A : Any = pad_token_id
A : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 311
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase : Any = 'src/transformers'
lowercase : str = 'docs/source/en/tasks'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A : Union[str, Any] = f.readlines()
# Find the start prompt.
A : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
A : List[str] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase : int = direct_transformers_import(TRANSFORMERS_PATH)
lowercase : str = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase : Optional[int] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : int = TASK_GUIDE_TO_MODELS[task_guide]
A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
A : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A, A, A : Optional[int] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
A : Optional[int] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 311
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A : int = 6
A : List[str] = 1
A : int = 1901
A : List[Any] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A : List[str] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A : Union[str, Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A : Dict = day - days_per_month[month - 2]
if month > 12:
year += 1
A : Tuple = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 311
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Tuple = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
A, A : Optional[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip()
lowercase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 311
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Tuple = [1]
A, A, A : Dict = 0, 0, 0
A : Tuple = ugly_nums[ia] * 2
A : List[str] = ugly_nums[ia] * 3
A : Optional[int] = ugly_nums[ia] * 5
for _ in range(1 , snake_case__ ):
A : Any = min(snake_case__ , snake_case__ , snake_case__ )
ugly_nums.append(snake_case__ )
if next_num == next_a:
ia += 1
A : Tuple = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
A : Dict = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
A : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_00) = }''')
| 311
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
A : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ):
A : List[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : int = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
A : Dict = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
lowercase : Union[str, Any] = list[tuple[int, int]]
lowercase : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : int = pos_x
A : Optional[Any] = pos_y
A : Optional[Any] = (pos_y, pos_x)
A : str = goal_x
A : Optional[int] = goal_y
A : List[Any] = g_cost
A : str = parent
A : str = self.calculate_heuristic()
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
A : Optional[int] = abs(self.pos_x - self.goal_x )
A : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
A : Optional[Any] = [self.start]
A : list[Node] = []
A : Tuple = False
def __lowerCAmelCase ( self ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A : Optional[int] = True
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
A : Any = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
A : List[Any] = []
for action in delta:
A : List[str] = parent.pos_x + action[1]
A : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path:
"""simple docstring"""
A : int = node
A : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase : Tuple = (0, 0)
lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
lowercase : int = GreedyBestFirst(init, goal)
lowercase : Union[str, Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase : Dict = 2
for elem in grid:
print(elem)
| 311
|
'''simple docstring'''
from __future__ import annotations
from random import random
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = value
A : Any = random()
A : Node | None = None
A : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
A : Optional[Any] = str(self.value ) + ''' '''
A : Union[str, Any] = str(self.left or '''''' )
A : Any = str(self.right or '''''' )
return value + left + right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A, A : Any = split(root.left , snake_case__ )
return left, root
else:
A, A : Optional[int] = split(root.right , snake_case__ )
return root, right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A : List[str] = merge(left.right , snake_case__ )
return left
else:
A : Tuple = merge(snake_case__ , right.left )
return right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Node(snake_case__ )
A, A : Tuple = split(snake_case__ , snake_case__ )
return merge(merge(snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : Dict = split(snake_case__ , value - 1 )
A, A : Any = split(snake_case__ , snake_case__ )
return merge(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
A : int = insert(snake_case__ , int(arg[1:] ) )
elif arg[0] == "-":
A : int = erase(snake_case__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
A : Optional[int] = input()
while args != "q":
A : str = interact_treap(snake_case__ , snake_case__ )
print(snake_case__ )
A : Union[str, Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = '''bilinear'''
A : Optional[int] = max_size
A : Dict = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Tuple = []
for img in imgs:
A, A : str = img.shape[:2]
# later: provide list and randomly choose index for resize
A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if h < w:
A, A : Tuple = size, scale * w
else:
A, A : str = scale * h, size
if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size:
A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Tuple = newh * scale
A : int = neww * scale
A : List[str] = int(neww + 0.5 )
A : int = int(newh + 0.5 )
if img.dtype == np.uinta:
A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE )
A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A : str = np.asarray(SCREAMING_SNAKE_CASE )
else:
A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A : List[Any] = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE )
return img_augs
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A : str = cfg.INPUT.FORMAT
A : int = cfg.SIZE_DIVISIBILITY
A : Optional[int] = cfg.PAD_VALUE
A : Dict = cfg.INPUT.MAX_SIZE_TEST
A : Optional[Any] = cfg.MODEL.DEVICE
A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
A : List[str] = [im.shape[-2:] for im in images]
A : Optional[Any] = [
nn.functional.pad(
SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : str = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE ) == 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A : Tuple = torch.tensor([im.shape[:2] for im in images] )
A : Dict = self.aug(SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!"
A, A : str = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__ )
tensor[:, 1].clamp_(min=0 , max=snake_case__ )
tensor[:, 2].clamp_(min=0 , max=snake_case__ )
tensor[:, 3].clamp_(min=0 , max=snake_case__ )
| 311
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase : Tuple = logging.get_logger(__name__)
class A ( __snake_case ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 311
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowercase : Tuple = parser.parse_args()
lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 311
| 1
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 311
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase : str = datasets.utils.logging.get_logger(__name__)
lowercase : Union[str, Any] = ['names', 'prefix']
lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase : List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase : Any = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
__magic_name__ = ","
__magic_name__ = None
__magic_name__ = "infer"
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = "."
__magic_name__ = None
__magic_name__ = '"'
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = None
__magic_name__ = 10000
__magic_name__ = None
__magic_name__ = "strict"
__magic_name__ = "error"
__magic_name__ = None
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
A : Optional[Any] = self.delimiter
if self.column_names is not None:
A : Optional[Any] = self.column_names
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
__magic_name__ = CsvConfig
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ):
A : str = data_files
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = [files]
A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A : Tuple = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[str] = [files]
A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
A : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return pa_table
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ):
A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ):
A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 311
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case__ , x % y )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ = 20 ):
'''simple docstring'''
A : Any = 1
for i in range(1 , n + 1 ):
A : int = lcm(snake_case__ , snake_case__ )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 311
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : int = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class A ( __snake_case ):
__magic_name__ = '''sew'''
def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
A : Optional[Any] = hidden_size
A : Any = feat_extract_norm
A : Optional[int] = feat_extract_activation
A : Tuple = list(SCREAMING_SNAKE_CASE )
A : List[str] = list(SCREAMING_SNAKE_CASE )
A : List[str] = list(SCREAMING_SNAKE_CASE )
A : int = conv_bias
A : List[Any] = num_conv_pos_embeddings
A : Tuple = num_conv_pos_embedding_groups
A : int = len(self.conv_dim )
A : Dict = num_hidden_layers
A : Optional[int] = intermediate_size
A : Any = squeeze_factor
A : int = hidden_act
A : str = num_attention_heads
A : Dict = hidden_dropout
A : Optional[Any] = attention_dropout
A : List[str] = activation_dropout
A : Union[str, Any] = feat_proj_dropout
A : Union[str, Any] = final_dropout
A : int = layerdrop
A : Optional[Any] = layer_norm_eps
A : Any = initializer_range
A : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : Optional[Any] = apply_spec_augment
A : Optional[Any] = mask_time_prob
A : Union[str, Any] = mask_time_length
A : Optional[Any] = mask_time_min_masks
A : str = mask_feature_prob
A : Tuple = mask_feature_length
A : Any = mask_feature_min_masks
# ctc loss
A : List[Any] = ctc_loss_reduction
A : Dict = ctc_zero_infinity
# sequence classification
A : int = use_weighted_layer_sum
A : Optional[int] = classifier_proj_size
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 311
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 1000 ):
'''simple docstring'''
A : Optional[int] = 2**power
A : str = 0
while n:
A, A : Tuple = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 311
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = SwinConfig()
A : List[Any] = swin_name.split('''_''' )
A : Tuple = name_split[1]
A : Union[str, Any] = int(name_split[4] )
A : str = int(name_split[3][-1] )
if model_size == "tiny":
A : Optional[int] = 96
A : Optional[Any] = (2, 2, 6, 2)
A : Any = (3, 6, 12, 24)
elif model_size == "small":
A : Optional[int] = 96
A : str = (2, 2, 18, 2)
A : Tuple = (3, 6, 12, 24)
elif model_size == "base":
A : int = 128
A : Optional[Any] = (2, 2, 18, 2)
A : List[str] = (4, 8, 16, 32)
else:
A : Dict = 192
A : Optional[Any] = (2, 2, 18, 2)
A : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
A : Dict = 2_1841
else:
A : str = 1000
A : List[str] = '''huggingface/label-files'''
A : Any = '''imagenet-1k-id2label.json'''
A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
A : str = {int(snake_case__ ): v for k, v in idalabel.items()}
A : Tuple = idalabel
A : Tuple = {v: k for k, v in idalabel.items()}
A : Tuple = img_size
A : Dict = num_classes
A : Optional[Any] = embed_dim
A : str = depths
A : str = num_heads
A : Optional[int] = window_size
return config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if "patch_embed.proj" in name:
A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
A : Optional[int] = '''encoder.''' + name
if "attn.proj" in name:
A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
A : Tuple = '''layernorm.weight'''
if name == "norm.bias":
A : Tuple = '''layernorm.bias'''
if "head" in name:
A : Any = name.replace('''head''' , '''classifier''' )
else:
A : List[Any] = '''swin.''' + name
return name
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A : Dict = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A : Dict = key.split('''.''' )
A : Optional[int] = int(key_split[1] )
A : List[str] = int(key_split[3] )
A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A : Any = val[:dim, :]
A : Dict = val[
dim : dim * 2, :
]
A : List[str] = val[-dim:, :]
else:
A : Any = val[
:dim
]
A : Optional[int] = val[
dim : dim * 2
]
A : Any = val[
-dim:
]
else:
A : str = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A : Optional[Any] = get_swin_config(snake_case__ )
A : Optional[int] = SwinForImageClassification(snake_case__ )
model.eval()
A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' )
A : Any = timm_model(inputs['''pixel_values'''] )
A : Optional[Any] = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase : int = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 311
| 1
|
'''simple docstring'''
import string
import numpy
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class A :
__magic_name__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__magic_name__ = numpy.vectorize(lambda __snake_case : x % 36 )
__magic_name__ = numpy.vectorize(__snake_case )
def __init__( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : List[Any] = self.modulus(SCREAMING_SNAKE_CASE ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
A : Optional[Any] = encrypt_key.shape[0]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.key_string.index(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.key_string[round(SCREAMING_SNAKE_CASE )]
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A : List[Any] = det % len(self.key_string )
A : List[str] = len(self.key_string )
if greatest_common_divisor(SCREAMING_SNAKE_CASE , len(self.key_string ) ) != 1:
A : Tuple = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : List[Any] = [char for char in text.upper() if char in self.key_string]
A : str = chars[-1]
while len(SCREAMING_SNAKE_CASE ) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Dict = self.process_text(text.upper() )
A : Union[str, Any] = ''''''
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ):
A : Any = text[i : i + self.break_key]
A : int = [self.replace_letters(SCREAMING_SNAKE_CASE ) for char in batch]
A : Tuple = numpy.array([vec] ).T
A : Optional[int] = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE ) ).T.tolist()[
0
]
A : str = ''''''.join(
self.replace_digits(SCREAMING_SNAKE_CASE ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __lowerCAmelCase ( self ) -> numpy.ndarray:
"""simple docstring"""
A : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A : Union[str, Any] = det % len(self.key_string )
A : int = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
A : int = i
break
A : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = self.make_decrypt_key()
A : Tuple = self.process_text(text.upper() )
A : Tuple = ''''''
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ):
A : Any = text[i : i + self.break_key]
A : Dict = [self.replace_letters(SCREAMING_SNAKE_CASE ) for char in batch]
A : int = numpy.array([vec] ).T
A : List[Any] = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE ) ).T.tolist()[0]
A : Tuple = ''''''.join(
self.replace_digits(SCREAMING_SNAKE_CASE ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = int(input('''Enter the order of the encryption key: ''' ) )
A : int = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(snake_case__ ):
A : List[str] = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
A : str = HillCipher(numpy.array(snake_case__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
A : Tuple = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
A : List[str] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
A : Optional[int] = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = vocab_size
A : List[str] = hidden_size
A : List[Any] = d_kv
A : Optional[Any] = d_ff
A : Dict = num_layers
A : Dict = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : Dict = dropout_rate
A : Dict = layer_norm_epsilon
A : Tuple = initializer_factor
A : Union[str, Any] = use_cache
A : int = eos_token_id
A : List[str] = decoder_start_token_id
# for backwards compatibility
A : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[Any] = patch_embed_hidden_size
A : Union[str, Any] = d_ff
A : Dict = dropout_rate
A : str = num_hidden_layers
A : Dict = num_attention_heads
A : Tuple = initializer_range
A : List[str] = initializer_factor
A : Union[str, Any] = attention_dropout
A : Tuple = layer_norm_eps
A : int = dense_act_fn
A : Optional[int] = seq_len
A : Tuple = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : Optional[Any] = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : str = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Any = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Dict = self.text_config.eos_token_id
A : Union[str, Any] = initializer_factor
A : Tuple = initializer_range
A : Optional[Any] = self.initializer_range
A : int = self.initializer_range
A : Tuple = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = copy.deepcopy(self.__dict__ )
A : Dict = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 311
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
A : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ):
A : List[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : int = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
A : Dict = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 311
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = inspect.getfile(accelerate.test_utils )
A : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A : Optional[Any] = test_metrics
@require_cpu
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
A : List[str] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 311
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(0 , snake_case__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(snake_case__ , 0 , -1 ):
for _ in range(snake_case__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(snake_case__ ) # upper half
reverse_floyd(snake_case__ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
lowercase : List[str] = 1
while K:
lowercase : List[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
lowercase : Any = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 311
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase : Union[str, Any] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : Dict = size if size is not None else {'''shortest_edge''': 256}
A : int = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A : Optional[int] = do_resize
A : Union[str, Any] = size
A : str = resample
A : Optional[int] = do_center_crop
A : Optional[int] = crop_size
A : Dict = do_rescale
A : List[Any] = rescale_factor
A : str = do_normalize
A : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A : Optional[int] = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Tuple = size if size is not None else self.size
A : Dict = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Dict = resample if resample is not None else self.resample
A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Optional[int] = crop_size if crop_size is not None else self.crop_size
A : str = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A : Dict = do_rescale if do_rescale is not None else self.do_rescale
A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Union[str, Any] = image_std if image_std is not None else self.image_std
A : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Dict = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A : List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : int = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[str]:
"""simple docstring"""
A : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE ):
A : str = target_sizes.numpy()
A : Tuple = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE )
A : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE )
else:
A : List[Any] = logits.argmax(dim=1 )
A : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 311
|
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
A : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , )
A : Optional[Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
| 311
| 1
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowercase : Optional[int] = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowercase : Optional[Any] = re.compile(R'([a-z\d])([A-Z])')
lowercase : int = re.compile(R'(?<!_)_(?!_)')
lowercase : str = re.compile(R'(_{2,})')
lowercase : Tuple = R'^\w+(\.\w+)*$'
lowercase : List[Any] = R'<>:/\|?*'
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : str = _uppercase_uppercase_re.sub(R'''\1_\2''' , snake_case__ )
A : Optional[int] = _lowercase_uppercase_re.sub(R'''\1_\2''' , snake_case__ )
return name.lower()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = _single_underscore_re.split(snake_case__ )
A : Dict = [_multiple_underscores_re.split(snake_case__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != '''''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if os.path.basename(snake_case__ ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if os.path.basename(snake_case__ ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , snake_case__ ):
raise ValueError(F'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return F'{filename_prefix_for_name(snake_case__ )}-{split}'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
A : int = filename_prefix_for_split(snake_case__ , snake_case__ )
if filetype_suffix:
prefix += F'.{filetype_suffix}'
A : Dict = os.path.join(snake_case__ , snake_case__ )
return F'{filepath}*'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
'''simple docstring'''
A : str = filename_prefix_for_split(snake_case__ , snake_case__ )
A : Dict = os.path.join(snake_case__ , snake_case__ )
if shard_lengths:
A : Optional[int] = len(snake_case__ )
A : Union[str, Any] = [F'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(snake_case__ )]
if filetype_suffix:
A : Dict = [filename + F'.{filetype_suffix}' for filename in filenames]
return filenames
else:
A : List[str] = prefix
if filetype_suffix:
filename += F'.{filetype_suffix}'
return [filename]
| 311
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : int = use_input_mask
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Tuple = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = initializer_range
A : Any = use_labels
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
),
) : Any = self.prepare_config_and_inputs()
A : Tuple = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = True
A : Tuple = True
A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A, A, A : Optional[int] = self.prepare_config_and_inputs()
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = BertGenerationEncoderTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs()
A : str = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311
| 1
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowercase : List[Any] = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ )
A : Optional[int] = TestCommand(*snake_case__ )
test_command.run()
A : List[str] = os.path.join(snake_case__ , '''README.md''' )
assert os.path.exists(snake_case__ )
A : Union[str, Any] = DatasetInfosDict.from_directory(snake_case__ )
A : Dict = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 235_1563,
'''num_examples''': 1_0000,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_8418,
'''num_examples''': 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A, A : str = getattr(dataset_infos['''default'''] , snake_case__ ), getattr(expected_dataset_infos['''default'''] , snake_case__ )
if key == "num_bytes":
assert is_apercent_close(snake_case__ , snake_case__ )
elif key == "splits":
assert list(snake_case__ ) == list(snake_case__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 311
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class A ( __snake_case ):
__magic_name__ = '''sigmoid'''
__magic_name__ = '''softmax'''
__magic_name__ = '''none'''
@add_end_docstrings(
__snake_case , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class A ( __snake_case ):
__magic_name__ = False
__magic_name__ = ClassificationFunction.NONE
def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Optional[Any] = tokenizer_kwargs
A : int = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
A : int = self.model.config.return_all_scores
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None:
A : Union[str, Any] = top_k
A : Dict = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , )
if return_all_scores:
A : Optional[int] = None
else:
A : Dict = 1
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A : int = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A : Any = '''top_k''' not in kwargs
if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
"""simple docstring"""
A : List[Any] = self.framework
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
A : Optional[int] = self.model.config.function_to_apply
else:
A : Optional[int] = ClassificationFunction.NONE
A : Any = model_outputs['''logits'''][0]
A : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A : int = sigmoid(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A : Any = softmax(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
A : int = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A : int = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )
if top_k is not None:
A : Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 311
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=__snake_case ):
__magic_name__ = ['''speech''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''speech'''] )
class A ( metaclass=__snake_case ):
__magic_name__ = ['''speech''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''speech'''] )
| 311
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( snake_case__ = "laptop" ):
'''simple docstring'''
A : Tuple = F'https://www.amazon.in/laptop/s?k={product}'
A : Optional[int] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text )
# Initialize a Pandas dataframe with the column titles
A : List[str] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
A : Optional[Any] = item.ha.text
A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
A : Optional[int] = '''Not available'''
try:
A : str = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
A : List[Any] = ''''''
try:
A : Dict = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
A : str = float('''nan''' )
except AttributeError:
pass
A : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A : List[str] = ''' '''
A : Optional[Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase : Union[str, Any] = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 311
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Tuple = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
A, A : Optional[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip()
lowercase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 311
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = x
A : str = y
for step in range(snake_case__ ): # noqa: B007
A : str = a * a - b * b + x
A : List[str] = 2 * a * b + y
A : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) )
def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ):
'''simple docstring'''
A : List[Any] = Image.new('''RGB''' , (image_width, image_height) )
A : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__ ):
for image_y in range(snake_case__ ):
# determine the figure-coordinates based on the image-coordinates
A : Optional[int] = figure_width / image_width * image_height
A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height
A : str = get_distance(snake_case__ , snake_case__ , snake_case__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
A : str = get_color_coded_rgb(snake_case__ )
else:
A : List[Any] = get_black_and_white_rgb(snake_case__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 311
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : int = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A ( __snake_case ):
__magic_name__ = '''vivit'''
def __init__( self , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=[2, 16, 16] , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_fast" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-06 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Any = hidden_size
A : Union[str, Any] = num_hidden_layers
A : int = num_attention_heads
A : str = intermediate_size
A : Optional[int] = hidden_act
A : List[str] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Tuple = initializer_range
A : Dict = layer_norm_eps
A : Any = image_size
A : Any = num_frames
A : Tuple = tubelet_size
A : int = num_channels
A : Optional[int] = qkv_bias
super().__init__(**SCREAMING_SNAKE_CASE )
| 311
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase : Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowercase : Optional[Any] = parser.parse_args()
if args.check_lib:
lowercase : List[Any] = importlib.import_module('transformers')
lowercase : str = Path(transformers_module.__file__).parent
else:
lowercase : List[Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 311
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Optional[int] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''swin2sr'''
__magic_name__ = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=180 , SCREAMING_SNAKE_CASE=[6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE=[6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=2.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="1conv" , SCREAMING_SNAKE_CASE="pixelshuffle" , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : Tuple = image_size
A : Optional[int] = patch_size
A : Tuple = num_channels
A : List[Any] = embed_dim
A : Union[str, Any] = depths
A : int = len(SCREAMING_SNAKE_CASE )
A : Dict = num_heads
A : List[str] = window_size
A : int = mlp_ratio
A : List[str] = qkv_bias
A : List[str] = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : Optional[int] = drop_path_rate
A : int = hidden_act
A : List[Any] = use_absolute_embeddings
A : Optional[int] = layer_norm_eps
A : str = initializer_range
A : List[Any] = upscale
A : List[Any] = img_range
A : List[str] = resi_connection
A : int = upsampler
| 311
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]:
"""simple docstring"""
A : List[str] = parent
A : Optional[Any] = batch_size
A : Tuple = image_size
A : int = patch_size
A : Optional[int] = num_channels
A : str = is_training
A : List[Any] = use_labels
A : Any = hidden_size
A : Any = num_hidden_layers
A : Optional[int] = num_attention_heads
A : Any = intermediate_size
A : List[str] = hidden_act
A : str = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Optional[int] = initializer_range
A : Dict = scope
A : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[Any] = (image_size // patch_size) ** 2
A : Tuple = num_patches + 2
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Tuple = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE )
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE )
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : Optional[int] = 1
A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE )
A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : str = self.type_sequence_label_size
A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE )
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Optional[Any] = 1
A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE )
A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[int] = self.prepare_config_and_inputs()
A, A, A : Tuple = config_and_inputs
A : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = TFDeiTModelTester(self )
A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(SCREAMING_SNAKE_CASE )
A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
A : Dict = self.default_image_processor
A : List[str] = prepare_img()
A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
A : Optional[int] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A ( __snake_case ):
__magic_name__ = '''vit_msn'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-06 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : List[Any] = num_hidden_layers
A : Dict = num_attention_heads
A : str = intermediate_size
A : str = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : str = attention_probs_dropout_prob
A : List[str] = initializer_range
A : Dict = layer_norm_eps
A : Any = image_size
A : Optional[int] = patch_size
A : Optional[int] = num_channels
A : Union[str, Any] = qkv_bias
| 311
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : List[str] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowercase : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : str = {}
with open(snake_case__ , '''r''' ) as file:
for line_number, line in enumerate(snake_case__ ):
A : List[str] = line.strip()
if line:
A : List[Any] = line.split()
A : int = line_number
A : List[Any] = words[0]
A : Union[str, Any] = value
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
for attribute in key.split('''.''' ):
A : List[str] = getattr(snake_case__ , snake_case__ )
A : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
A : str = PARAM_MAPPING[full_name.split('''.''' )[-1]]
A : int = '''param'''
if weight_type is not None and weight_type != "param":
A : Dict = getattr(snake_case__ , snake_case__ ).shape
elif weight_type is not None and weight_type == "param":
A : Tuple = hf_pointer
for attribute in hf_param_name.split('''.''' ):
A : Optional[Any] = getattr(snake_case__ , snake_case__ )
A : str = shape_pointer.shape
# let's reduce dimension
A : int = value[0]
else:
A : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
A : Optional[int] = value
elif weight_type == "weight_g":
A : str = value
elif weight_type == "weight_v":
A : Any = value
elif weight_type == "bias":
A : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
A : Optional[Any] = getattr(snake_case__ , snake_case__ )
A : Optional[int] = value
else:
A : Union[str, Any] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
A : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
A : Any = '''param'''
if weight_type is not None and weight_type != "param":
A : List[str] = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
A : List[Any] = '''.'''.join([key, hf_param_name] )
else:
A : Optional[int] = key
A : Dict = value if '''lm_head''' in full_key else value[0]
lowercase : Tuple = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
'''simple docstring'''
A : Dict = False
for key, mapped_key in MAPPING.items():
A : str = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A : Any = True
if "*" in mapped_key:
A : Optional[int] = name.split(snake_case__ )[0].split('''.''' )[-2]
A : List[Any] = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
A : List[Any] = '''weight_g'''
elif "weight_v" in name:
A : str = '''weight_v'''
elif "bias" in name:
A : List[str] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A : str = '''weight'''
else:
A : Tuple = None
if hf_dict is not None:
rename_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return is_used
return is_used
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = []
A : str = fairseq_model.state_dict()
A : Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
A : List[str] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
A : Optional[Any] = True
else:
A : int = load_wavaveca_layer(snake_case__ , snake_case__ , snake_case__ )
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = full_name.split('''conv_layers.''' )[-1]
A : Optional[int] = name.split('''.''' )
A : Dict = int(items[0] )
A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
A : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
A : Union[str, Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
A : Any = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
A : Dict = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__=False ):
'''simple docstring'''
if config_path is not None:
A : Union[str, Any] = WavaVecaConfig.from_pretrained(snake_case__ )
else:
A : Optional[int] = WavaVecaConfig()
if is_seq_class:
A : Tuple = read_txt_into_dict(snake_case__ )
A : Dict = idalabel
A : Union[str, Any] = WavaVecaForSequenceClassification(snake_case__ )
A : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
feature_extractor.save_pretrained(snake_case__ )
elif is_finetuned:
if dict_path:
A : Union[str, Any] = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : Any = target_dict.pad_index
A : int = target_dict.bos_index
A : Dict = target_dict.eos_index
A : Optional[Any] = len(target_dict.symbols )
A : int = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
A : str = 0
A : str = 1
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(snake_case__ , snake_case__ )
A : Optional[Any] = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
A : int = True if config.feat_extract_norm == '''layer''' else False
A : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
A : Optional[Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
A : Tuple = WavaVecaForCTC(snake_case__ )
else:
A : List[str] = WavaVecaForPreTraining(snake_case__ )
if is_finetuned or is_seq_class:
A, A, A : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A : Dict = argparse.Namespace(task='''audio_pretraining''' )
A : Tuple = fairseq.tasks.setup_task(snake_case__ )
A, A, A : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__ )
A : str = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowercase : Tuple = parser.parse_args()
lowercase : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 311
|
'''simple docstring'''
from __future__ import annotations
lowercase : Union[str, Any] = list[tuple[int, int]]
lowercase : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : int = pos_x
A : Optional[Any] = pos_y
A : Optional[Any] = (pos_y, pos_x)
A : str = goal_x
A : Optional[int] = goal_y
A : List[Any] = g_cost
A : str = parent
A : str = self.calculate_heuristic()
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
A : Optional[int] = abs(self.pos_x - self.goal_x )
A : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
A : Optional[Any] = [self.start]
A : list[Node] = []
A : Tuple = False
def __lowerCAmelCase ( self ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A : Optional[int] = True
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
A : Any = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
A : List[Any] = []
for action in delta:
A : List[str] = parent.pos_x + action[1]
A : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path:
"""simple docstring"""
A : int = node
A : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase : Tuple = (0, 0)
lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
lowercase : int = GreedyBestFirst(init, goal)
lowercase : Union[str, Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase : Dict = 2
for elem in grid:
print(elem)
| 311
| 1
|
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase : Union[str, Any] = get_tests_dir('fixtures/dummy-config.json')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = 0
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''fake-roberta''' )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertEqual(type(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoConfig.register('''model''' , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoConfig.register('''bert''' , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
A : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE )
A : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
A : Any = AutoConfig.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
A : Dict = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE )
A : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE )
A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
class A ( __snake_case ):
__magic_name__ = '''new-model'''
try:
AutoConfig.register('''new-model''' , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
A : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
A : Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
A : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 311
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase : Any = 'src/transformers'
lowercase : str = 'docs/source/en/tasks'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A : Union[str, Any] = f.readlines()
# Find the start prompt.
A : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
A : List[str] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase : int = direct_transformers_import(TRANSFORMERS_PATH)
lowercase : str = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase : Optional[int] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : int = TASK_GUIDE_TO_MODELS[task_guide]
A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
A : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A, A, A : Optional[int] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
A : Optional[int] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 311
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = AltDiffusionPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
A : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
A : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
A : Tuple = CLIPTextModel(SCREAMING_SNAKE_CASE )
A : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
A : str = 77
A : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Dict:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
A : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
A : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : Dict = self.get_dummy_components()
torch.manual_seed(0 )
A : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
A : Union[str, Any] = RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE )
A : int = text_encoder
A : Tuple = AltDiffusionPipeline(**SCREAMING_SNAKE_CASE )
A : int = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
A : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
A : List[Any] = '''A photo of an astronaut'''
A : Union[str, Any] = alt_pipe(**SCREAMING_SNAKE_CASE )
A : Optional[int] = output.images
A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : Any = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : str = self.get_dummy_components()
A : Dict = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
A : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
A : Union[str, Any] = RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE )
A : Optional[int] = text_encoder
A : int = AltDiffusionPipeline(**SCREAMING_SNAKE_CASE )
A : Union[str, Any] = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
A : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
A : Dict = alt_pipe(**SCREAMING_SNAKE_CASE )
A : Tuple = output.images
A : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : Optional[Any] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=SCREAMING_SNAKE_CASE )
A : str = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
A : Any = '''A painting of a squirrel eating a burger'''
A : Union[str, Any] = torch.manual_seed(0 )
A : str = alt_pipe([prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
A : Optional[int] = output.images
A : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A : Any = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Dict = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
A : List[str] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE )
A : int = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = '''A painting of a squirrel eating a burger'''
A : Optional[Any] = torch.manual_seed(0 )
A : int = alt_pipe([prompt] , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''numpy''' )
A : Any = output.images
A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A : Optional[int] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 311
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Tuple = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
A, A : Optional[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip()
lowercase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 311
| 1
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase : str = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Any = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case__ , default='''data/dump''' , help='''The dump file prefix.''' )
A : List[str] = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
A : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
A : Optional[int] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
A : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
A : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
A : Any = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
A : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A : str = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
A : Optional[int] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
A : Optional[int] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'{len(snake_case__ )} examples to process.' )
A : List[str] = []
A : str = 0
A : Optional[int] = 1_0000
A : Any = time.time()
for text in data:
A : int = F'{bos} {text.strip()} {sep}'
A : Dict = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
rslt.append(snake_case__ )
iter += 1
if iter % interval == 0:
A : int = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
A : int = time.time()
logger.info('''Finished binarization''' )
logger.info(F'{len(snake_case__ )} examples processed.' )
A : str = F'{args.dump_file}.{args.tokenizer_name}.pickle'
A : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
A : Optional[Any] = [np.uintaa(snake_case__ ) for d in rslt]
else:
A : Dict = [np.intaa(snake_case__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(snake_case__ , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase : int = logging.get_logger(__name__)
# TODO: upload to AWS
lowercase : List[Any] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''retribert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Union[str, Any] = vocab_size
A : Optional[int] = hidden_size
A : Any = num_hidden_layers
A : Tuple = num_attention_heads
A : Union[str, Any] = hidden_act
A : Union[str, Any] = intermediate_size
A : Optional[Any] = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : int = initializer_range
A : Union[str, Any] = layer_norm_eps
A : List[str] = share_encoders
A : List[str] = projection_dim
| 311
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
A : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ):
A : List[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : int = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
A : Dict = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 311
| 1
|
'''simple docstring'''
from typing import Any
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not input_list:
return []
A : Any = [input_list.count(snake_case__ ) for value in input_list]
A : Any = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
from __future__ import annotations
from random import random
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = value
A : Any = random()
A : Node | None = None
A : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
A : Optional[Any] = str(self.value ) + ''' '''
A : Union[str, Any] = str(self.left or '''''' )
A : Any = str(self.right or '''''' )
return value + left + right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A, A : Any = split(root.left , snake_case__ )
return left, root
else:
A, A : Optional[int] = split(root.right , snake_case__ )
return root, right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A : List[str] = merge(left.right , snake_case__ )
return left
else:
A : Tuple = merge(snake_case__ , right.left )
return right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Node(snake_case__ )
A, A : Tuple = split(snake_case__ , snake_case__ )
return merge(merge(snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : Dict = split(snake_case__ , value - 1 )
A, A : Any = split(snake_case__ , snake_case__ )
return merge(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
A : int = insert(snake_case__ , int(arg[1:] ) )
elif arg[0] == "-":
A : int = erase(snake_case__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
A : Optional[int] = input()
while args != "q":
A : str = interact_treap(snake_case__ , snake_case__ )
print(snake_case__ )
A : Union[str, Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase_ ( snake_case__ , snake_case__=10 ):
'''simple docstring'''
A : str = []
for _ in range(snake_case__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase_ ( snake_case__ , snake_case__=10 ):
'''simple docstring'''
A : Any = []
for step in range(snake_case__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A : List[str] = os.path.join(snake_case__ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , snake_case__ )
A : Union[str, Any] = torch.load(snake_case__ )
scheduler.load_state_dict(snake_case__ )
return lrs
@require_torch
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , delta=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor([0.4, 0.2, -0.5] )
A : Dict = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A : Tuple = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
A : Tuple = criterion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE )
A : str = torch.tensor([0.4, 0.2, -0.5] )
A : Dict = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A : Optional[int] = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE , scale_parameter=SCREAMING_SNAKE_CASE , warmup_init=SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
A : str = criterion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class A ( unittest.TestCase ):
__magic_name__ = nn.Linear(50 , 50 ) if is_torch_available() else None
__magic_name__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__magic_name__ = 10
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , delta=SCREAMING_SNAKE_CASE , msg=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : List[str] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A : Tuple = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A, A : Tuple = data
A : Tuple = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A : Any = unwrap_schedule(SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tol=1e-2 , msg=F'failed for {scheduler_func} in normal scheduler' , )
A : Tuple = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
A : Dict = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , msg=F'failed for {scheduler_func} in save and reload' )
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : str = fn
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.fn(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Any = list(map(self , scheduler.lr_lambdas ) )
| 311
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = '''bilinear'''
A : Optional[int] = max_size
A : Dict = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Tuple = []
for img in imgs:
A, A : str = img.shape[:2]
# later: provide list and randomly choose index for resize
A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if h < w:
A, A : Tuple = size, scale * w
else:
A, A : str = scale * h, size
if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size:
A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Tuple = newh * scale
A : int = neww * scale
A : List[str] = int(neww + 0.5 )
A : int = int(newh + 0.5 )
if img.dtype == np.uinta:
A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE )
A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A : str = np.asarray(SCREAMING_SNAKE_CASE )
else:
A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A : List[Any] = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE )
return img_augs
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A : str = cfg.INPUT.FORMAT
A : int = cfg.SIZE_DIVISIBILITY
A : Optional[int] = cfg.PAD_VALUE
A : Dict = cfg.INPUT.MAX_SIZE_TEST
A : Optional[Any] = cfg.MODEL.DEVICE
A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
A : List[str] = [im.shape[-2:] for im in images]
A : Optional[Any] = [
nn.functional.pad(
SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : str = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE ) == 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A : Tuple = torch.tensor([im.shape[:2] for im in images] )
A : Dict = self.aug(SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!"
A, A : str = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__ )
tensor[:, 1].clamp_(min=0 , max=snake_case__ )
tensor[:, 2].clamp_(min=0 , max=snake_case__ )
tensor[:, 3].clamp_(min=0 , max=snake_case__ )
| 311
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase : Dict = logging.get_logger(__name__)
class A ( __snake_case ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 311
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowercase : Tuple = parser.parse_args()
lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 311
| 1
|
'''simple docstring'''
from math import factorial, pi
def lowerCAmelCase_ ( snake_case__ , snake_case__ = 30 ):
'''simple docstring'''
if not isinstance(snake_case__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(snake_case__ , snake_case__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
A : Tuple = float(snake_case__ )
A : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(snake_case__ ) )
def lowerCAmelCase_ ( snake_case__ , snake_case__ = 30 ):
'''simple docstring'''
if not isinstance(snake_case__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(snake_case__ , snake_case__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
A : str = float(snake_case__ )
A : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 311
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase : str = datasets.utils.logging.get_logger(__name__)
lowercase : Union[str, Any] = ['names', 'prefix']
lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase : List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase : Any = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
__magic_name__ = ","
__magic_name__ = None
__magic_name__ = "infer"
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = "."
__magic_name__ = None
__magic_name__ = '"'
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = None
__magic_name__ = 10000
__magic_name__ = None
__magic_name__ = "strict"
__magic_name__ = "error"
__magic_name__ = None
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
A : Optional[Any] = self.delimiter
if self.column_names is not None:
A : Optional[Any] = self.column_names
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
__magic_name__ = CsvConfig
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ):
A : str = data_files
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = [files]
A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A : Tuple = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[str] = [files]
A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
A : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return pa_table
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ):
A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ):
A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Dict:
"""simple docstring"""
A : Dict = parent
A : Any = 13
A : Optional[Any] = 7
A : Dict = True
A : List[Any] = True
A : List[str] = True
A : List[str] = True
A : Tuple = 99
A : List[str] = 384
A : Dict = 2
A : List[Any] = 4
A : Tuple = 37
A : str = '''gelu'''
A : Dict = 0.1
A : Tuple = 0.1
A : Dict = 512
A : List[str] = 16
A : List[str] = 2
A : List[str] = 0.02
A : Optional[int] = 3
A : Any = 4
A : Any = 128
A : Optional[Any] = 2
A : Optional[int] = 9
A : Dict = 1
A : Union[str, Any] = None
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = None
if self.use_input_mask:
A : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A : str = None
if self.use_token_type_ids:
A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Tuple = None
A : int = None
A : Tuple = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Optional[Any] = TFConvBertModel(config=SCREAMING_SNAKE_CASE )
A : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Dict = [input_ids, input_mask]
A : Dict = model(SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : int = TFConvBertForMaskedLM(config=SCREAMING_SNAKE_CASE )
A : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[str] = self.num_labels
A : Optional[Any] = TFConvBertForSequenceClassification(config=SCREAMING_SNAKE_CASE )
A : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = self.num_choices
A : List[Any] = TFConvBertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
A : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Union[str, Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Union[str, Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Optional[Any] = self.num_labels
A : Union[str, Any] = TFConvBertForTokenClassification(config=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : int = TFConvBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Tuple = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : str = config_and_inputs
A : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = TFConvBertModelTester(self )
A : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[int] = True
A : List[Any] = True
if hasattr(SCREAMING_SNAKE_CASE , '''use_cache''' ):
A : str = True
A : Optional[int] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
A : str = getattr(self.model_tester , '''key_length''' , SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
A : Tuple = len(model(SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
A : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''saved_model''' , '''1''' )
A : Optional[int] = tf.keras.models.load_model(SCREAMING_SNAKE_CASE )
A : str = model(SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
A : str = outputs['''encoder_hidden_states''']
A : List[Any] = outputs['''encoder_attentions''']
else:
A : int = outputs['''hidden_states''']
A : Optional[Any] = outputs['''attentions''']
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
A : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Dict = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A, A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A : Tuple = True
A : List[Any] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
A : Tuple = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
A : Optional[Any] = getattr(self.model_tester , '''key_length''' , SCREAMING_SNAKE_CASE )
A : str = getattr(self.model_tester , '''key_length''' , SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE ):
A : Dict = len(SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
A : Any = outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE ):
A : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A : Any = True
A : int = False
A : str = model_class(SCREAMING_SNAKE_CASE )
A : Optional[int] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : int = len(SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
A : Any = model_class(SCREAMING_SNAKE_CASE )
A : Optional[int] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A : Optional[int] = True
A : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
A : Dict = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
A : Union[str, Any] = True
A : Any = True
A : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
A : int = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE )
@require_tf
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[int] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
A : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A : List[str] = model(SCREAMING_SNAKE_CASE )[0]
A : Tuple = [1, 6, 768]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
| 311
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : int = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class A ( __snake_case ):
__magic_name__ = '''sew'''
def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
A : Optional[Any] = hidden_size
A : Any = feat_extract_norm
A : Optional[int] = feat_extract_activation
A : Tuple = list(SCREAMING_SNAKE_CASE )
A : List[str] = list(SCREAMING_SNAKE_CASE )
A : List[str] = list(SCREAMING_SNAKE_CASE )
A : int = conv_bias
A : List[Any] = num_conv_pos_embeddings
A : Tuple = num_conv_pos_embedding_groups
A : int = len(self.conv_dim )
A : Dict = num_hidden_layers
A : Optional[int] = intermediate_size
A : Any = squeeze_factor
A : int = hidden_act
A : str = num_attention_heads
A : Dict = hidden_dropout
A : Optional[Any] = attention_dropout
A : List[str] = activation_dropout
A : Union[str, Any] = feat_proj_dropout
A : Union[str, Any] = final_dropout
A : int = layerdrop
A : Optional[Any] = layer_norm_eps
A : Any = initializer_range
A : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : Optional[Any] = apply_spec_augment
A : Optional[Any] = mask_time_prob
A : Union[str, Any] = mask_time_length
A : Optional[Any] = mask_time_min_masks
A : str = mask_feature_prob
A : Tuple = mask_feature_length
A : Any = mask_feature_min_masks
# ctc loss
A : List[Any] = ctc_loss_reduction
A : Dict = ctc_zero_infinity
# sequence classification
A : int = use_weighted_layer_sum
A : Optional[int] = classifier_proj_size
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 311
| 1
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase : Dict = getLogger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 8 , snake_case__ = 1024 , snake_case__="val" , snake_case__=None , snake_case__=False , snake_case__="summarization" , snake_case__=None , snake_case__=1 , snake_case__ = None , snake_case__="" , **snake_case__ , ):
'''simple docstring'''
A : str = str(snake_case__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=snake_case__ )
A : Tuple = Path(snake_case__ )
A : str = save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(snake_case__ )
A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ).cuda()
if fpaa:
A : Tuple = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case__ , snake_case__ ) # update config with task specific params
A : Optional[int] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
A : int = num_return_sequences
A : int = AutoTokenizer.from_pretrained(snake_case__ )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
A : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
A : str = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
A : Tuple = SeqaSeqDataset(
snake_case__ , snake_case__ , snake_case__ , max_target_length=1024 , type_path=snake_case__ , n_obs=snake_case__ , prefix=snake_case__ , **snake_case__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
A : str = ds.make_sortish_sampler(snake_case__ , distributed=snake_case__ , add_extra_examples=snake_case__ , shuffle=snake_case__ )
A : str = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=snake_case__ , collate_fn=ds.collate_fn )
A : Any = []
for batch in tqdm(snake_case__ ):
A : Optional[int] = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=snake_case__ , num_beams=snake_case__ , **snake_case__ , )
A : Optional[int] = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
A : List[str] = batch['''ids''']
if num_return_sequences > 1:
A : Optional[int] = chunks(snake_case__ , snake_case__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case__ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(snake_case__ , snake_case__ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=snake_case__ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=snake_case__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=snake_case__ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=snake_case__ , default=snake_case__ )
parser.add_argument(
'''--type_path''' , type=snake_case__ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=snake_case__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case__ , default=8 , required=snake_case__ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=snake_case__ , default=-1 , required=snake_case__ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=snake_case__ , default=1 , required=snake_case__ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=snake_case__ , default=600 , required=snake_case__ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument('''--tgt_lang''' , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument(
'''--prefix''' , type=snake_case__ , required=snake_case__ , default=snake_case__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
A : str = time.time()
A, A : str = parser.parse_known_args()
A : Dict = parse_numeric_n_bool_cl_kwargs(snake_case__ )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
A : List[str] = Path(args.save_dir + '''_tmp''' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) # this handles locking.
A : str = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
A : Tuple = {}
if args.src_lang is not None:
A : List[Any] = args.src_lang
if args.tgt_lang is not None:
A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case__ )
A, A : Any = eval_data_dir(
args.data_dir , snake_case__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case__ , **snake_case__ , )
if args.local_rank <= 0:
A : Any = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case__ )
A : List[str] = gather_results_from_each_node(snake_case__ , snake_case__ , args.sync_timeout )
A : List[Any] = combine_partial_results(snake_case__ )
if args.num_return_sequences > 1:
A : int = save_dir.joinpath('''pseudolabel_results.json''' )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(snake_case__ , snake_case__ )
return
A : Tuple = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(snake_case__ ) as f:
A : List[str] = [x.rstrip() for x in f.readlines()][: len(snake_case__ )]
# Calculate metrics, save metrics, and save _generations.txt
A : Optional[Any] = '''translation''' in args.task
A : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
A : List[str] = '''bleu''' if calc_bleu else '''rouge'''
A : Dict = score_fn(snake_case__ , snake_case__ )
A : Optional[Any] = len(snake_case__ )
A : Tuple = time.time() - start_time
A : Union[str, Any] = round(runtime / metrics['''n_obs'''] , 4 )
A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
A : Any = save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
print(snake_case__ )
write_txt_file(snake_case__ , save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(snake_case__ , save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = []
for partial_result in partial_results:
records.extend(snake_case__ )
A : Optional[int] = sorted(snake_case__ , key=lambda snake_case__ : x["id"] )
A : int = [x['''pred'''] for x in records]
return preds
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = time.time()
logger.info('''waiting for all nodes to finish''' )
A : List[Any] = None
while (time.time() - start_wait) < timeout:
A : Optional[Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(snake_case__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
A : Union[str, Any] = lmap(snake_case__ , snake_case__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = SwinConfig()
A : List[Any] = swin_name.split('''_''' )
A : Tuple = name_split[1]
A : Union[str, Any] = int(name_split[4] )
A : str = int(name_split[3][-1] )
if model_size == "tiny":
A : Optional[int] = 96
A : Optional[Any] = (2, 2, 6, 2)
A : Any = (3, 6, 12, 24)
elif model_size == "small":
A : Optional[int] = 96
A : str = (2, 2, 18, 2)
A : Tuple = (3, 6, 12, 24)
elif model_size == "base":
A : int = 128
A : Optional[Any] = (2, 2, 18, 2)
A : List[str] = (4, 8, 16, 32)
else:
A : Dict = 192
A : Optional[Any] = (2, 2, 18, 2)
A : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
A : Dict = 2_1841
else:
A : str = 1000
A : List[str] = '''huggingface/label-files'''
A : Any = '''imagenet-1k-id2label.json'''
A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
A : str = {int(snake_case__ ): v for k, v in idalabel.items()}
A : Tuple = idalabel
A : Tuple = {v: k for k, v in idalabel.items()}
A : Tuple = img_size
A : Dict = num_classes
A : Optional[Any] = embed_dim
A : str = depths
A : str = num_heads
A : Optional[int] = window_size
return config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if "patch_embed.proj" in name:
A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
A : Optional[int] = '''encoder.''' + name
if "attn.proj" in name:
A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
A : Tuple = '''layernorm.weight'''
if name == "norm.bias":
A : Tuple = '''layernorm.bias'''
if "head" in name:
A : Any = name.replace('''head''' , '''classifier''' )
else:
A : List[Any] = '''swin.''' + name
return name
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A : Dict = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A : Dict = key.split('''.''' )
A : Optional[int] = int(key_split[1] )
A : List[str] = int(key_split[3] )
A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A : Any = val[:dim, :]
A : Dict = val[
dim : dim * 2, :
]
A : List[str] = val[-dim:, :]
else:
A : Any = val[
:dim
]
A : Optional[int] = val[
dim : dim * 2
]
A : Any = val[
-dim:
]
else:
A : str = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A : Optional[Any] = get_swin_config(snake_case__ )
A : Optional[int] = SwinForImageClassification(snake_case__ )
model.eval()
A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' )
A : Any = timm_model(inputs['''pixel_values'''] )
A : Optional[Any] = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase : int = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 311
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.