code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "ViTImageProcessor"
lowercase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str ,lowercase_ : Any=None ,lowercase_ : Optional[Any]=None ,**lowercase_ : int ):
lowerCAmelCase__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase_ ,)
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
lowerCAmelCase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ ,lowercase_ )
def __call__( self : Optional[Any] ,lowercase_ : str=None ,lowercase_ : Optional[int]=None ,lowercase_ : Dict=None ,lowercase_ : List[Any]=None ,**lowercase_ : int ):
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
lowerCAmelCase__ : Optional[Any] = self.tokenizer(lowercase_ ,return_tensors=lowercase_ ,**lowercase_ )
if visual_prompt is not None:
lowerCAmelCase__ : Union[str, Any] = self.image_processor(lowercase_ ,return_tensors=lowercase_ ,**lowercase_ )
if images is not None:
lowerCAmelCase__ : Optional[int] = self.image_processor(lowercase_ ,return_tensors=lowercase_ ,**lowercase_ )
if visual_prompt is not None and images is not None:
lowerCAmelCase__ : Optional[Any] = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase__ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase__ : int = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) ,tensor_type=lowercase_ )
def __lowerCAmelCase ( self : Tuple ,*lowercase_ : str ,**lowercase_ : List[Any] ):
return self.tokenizer.batch_decode(*lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,*lowercase_ : Union[str, Any] ,**lowercase_ : List[str] ):
return self.tokenizer.decode(*lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase_ ,)
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase_ ,)
return self.image_processor
| 106 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 91 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case : Union[str, Any] = '''main'''
# Default branch name
snake_case : Optional[Any] = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
snake_case : Tuple = '''aaaaaaa'''
# This commit does not exist, so we should 404.
snake_case : Dict = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case : Dict = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def __lowercase ( ):
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def __lowercase ( ):
print('Bonjour!' )
yield
print('Au revoir!' )
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :int ) -> Optional[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class snake_case_ (unittest.TestCase ):
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def lowerCamelCase__( self :List[Any] ,__snake_case :str ) -> Tuple:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def lowerCamelCase__( self :List[Any] ,__snake_case :Union[str, Any] ) -> str:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def lowerCamelCase__( self :Dict ,__snake_case :Optional[int] ) -> Optional[int]:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def lowerCamelCase__( self :List[str] ) -> Any:
self.assertEqual(find_labels(__snake_case ) ,['labels'] )
self.assertEqual(find_labels(__snake_case ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__snake_case ) ,['start_positions', 'end_positions'] )
class snake_case_ (lowerCamelCase_ ):
pass
self.assertEqual(find_labels(__snake_case ) ,['labels'] )
@require_tf
def lowerCamelCase__( self :Union[str, Any] ) -> int:
self.assertEqual(find_labels(__snake_case ) ,['labels'] )
self.assertEqual(find_labels(__snake_case ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__snake_case ) ,['start_positions', 'end_positions'] )
class snake_case_ (lowerCamelCase_ ):
pass
self.assertEqual(find_labels(__snake_case ) ,['labels'] )
@require_flax
def lowerCamelCase__( self :Any ) -> str:
# Flax models don't have labels
self.assertEqual(find_labels(__snake_case ) ,[] )
self.assertEqual(find_labels(__snake_case ) ,[] )
self.assertEqual(find_labels(__snake_case ) ,[] )
class snake_case_ (lowerCamelCase_ ):
pass
self.assertEqual(find_labels(__snake_case ) ,[] )
| 109 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 109 | 1 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 318 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ (__lowercase ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class __magic_name__ (__lowercase , __lowercase ):
lowerCamelCase__ = 1
@register_to_config
def __init__( self , _a = 2000 , _a = 0.1_5 , _a = 0.0_1 , _a = 1_3_4_8.0 , _a = 1E-5 , _a = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
lowerCAmelCase_ = sigma_max
# setable values
lowerCAmelCase_ = None
self.set_sigmas(_a , _a , _a , _a )
def __a ( self , _a , _a = None ) -> torch.FloatTensor:
return sample
def __a ( self , _a , _a = None , _a = None ) -> Any:
lowerCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase_ = torch.linspace(1 , _a , _a , device=_a )
def __a ( self , _a , _a = None , _a = None , _a = None ) -> Optional[int]:
lowerCAmelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_a , _a )
lowerCAmelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase_ = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) )
lowerCAmelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __a ( self , _a , _a ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __a ( self , _a , _a , _a , _a = None , _a = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowerCAmelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase_ = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase_ = self.get_adjacent_sigma(_a , _a ).to(sample.device )
lowerCAmelCase_ = torch.zeros_like(_a )
lowerCAmelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase_ = diffusion.unsqueeze(-1 )
lowerCAmelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype )
lowerCAmelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a )
def __a ( self , _a , _a , _a = None , _a = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase_ = step_size.unsqueeze(-1 )
lowerCAmelCase_ = sample + step_size * model_output
lowerCAmelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __a ( self , _a , _a , _a , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase_ = timesteps.to(original_samples.device )
lowerCAmelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_a ) * sigmas[:, None, None, None]
)
lowerCAmelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 22 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Dict , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ) -> List[str]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
__lowercase : int = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , __a , standard_warn=__a )
__lowercase : Optional[Any] = dict(scheduler.config )
__lowercase : int = 1
__lowercase : Union[str, Any] = FrozenDict(__a )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
__lowercase : Dict = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , __a , standard_warn=__a )
__lowercase : str = dict(scheduler.config )
__lowercase : Tuple = True
__lowercase : Optional[Any] = FrozenDict(__a )
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[Union[str, int]] = "auto" ) -> List[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.enable_attention_slicing(__a )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowercase : Optional[int] = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : str , ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
__lowercase : List[Any] = self.segmentation_model(**__a )
__lowercase : Tuple = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowercase : Any = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowercase : Any = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , ) | 233 |
lowerCamelCase : Tuple = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
__lowercase : Dict = start
# add current to visited
visited.append(lowerCAmelCase_ )
__lowercase : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowercase : List[Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
__lowercase : Tuple = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase : Any = topological_sort('''a''', [], [])
print(sort) | 233 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCAmelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_UpperCamelCase ) , version.parse(_UpperCamelCase ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None ) -> None:
"""simple docstring"""
snake_case_ : int = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , _UpperCamelCase ):
snake_case_ : int = requirement, None, None
else:
snake_case_ : Dict = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
snake_case_ : Union[str, Any] = match[0]
snake_case_ : int = want_full.split(''',''' ) # there could be multiple requirements
snake_case_ : List[Any] = {}
for w in want_range:
snake_case_ : int = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
snake_case_ : Tuple = match[0]
snake_case_ : Dict = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
snake_case_ : str = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return
# check if any version is installed
try:
snake_case_ : Dict = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase , _UpperCamelCase )
| 363 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowerCAmelCase_ = '''Dummy User'''
lowerCAmelCase_ = '''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'''
lowerCAmelCase_ = '''https://hub-ci.huggingface.co'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowerCAmelCase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCamelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
HfFolder.save_token(_UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
return HfApi(endpoint=_UpperCamelCase )
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = HfFolder.get_token()
HfFolder.save_token(_UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
def _cleanup_repo(_UpperCamelCase ):
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
@contextmanager
def _temporary_repo(_UpperCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = f'''repo_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ : Any = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ : Tuple = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
snake_case_ : str = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 279 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : Optional[Any]=30 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=10 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Any=0.6 , __lowerCAmelCase : Optional[int]=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = mask_ratio
_UpperCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFViTMAEModel(config=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFViTMAEForPreTraining(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , training=__lowerCAmelCase )
# expected sequence length = num_patches
_UpperCAmelCase = (self.image_size // self.patch_size) ** 2
_UpperCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFViTMAEForPreTraining(__lowerCAmelCase )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(_UpperCAmelCase) = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_snake_case : Optional[Any] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
_snake_case : List[Any] = False
_snake_case : List[Any] = False
_snake_case : Optional[int] = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = TFViTMAEModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
# make the mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , noise=__lowerCAmelCase )
_UpperCAmelCase = copy.deepcopy(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
_UpperCAmelCase = outputs_dict[0].numpy()
_UpperCAmelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowerCAmelCase_ ( self : List[str] ):
# make the mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowerCAmelCase : int ):
_UpperCAmelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCAmelCase ):
_UpperCAmelCase = v.numpy()
else:
_UpperCAmelCase = np.array(__lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = prepare_numpy_arrays(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , noise=__lowerCAmelCase )
_UpperCAmelCase = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
_UpperCAmelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCAmelCase = tf.constant(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCAmelCase = tf_noise
super().check_pt_tf_models(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowerCAmelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(__lowerCAmelCase , __lowerCAmelCase ),)
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCAmelCase , """_keras_serializable""" , __lowerCAmelCase )
}
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCAmelCase = tf.convert_to_tensor(__lowerCAmelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
_UpperCAmelCase = main_layer_class(__lowerCAmelCase )
_UpperCAmelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_UpperCAmelCase = tf.keras.Model(__lowerCAmelCase , outputs=main_layer(__lowerCAmelCase ) )
_UpperCAmelCase = model(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__lowerCAmelCase , """keras_model.h5""" )
model.save(__lowerCAmelCase )
_UpperCAmelCase = tf.keras.models.load_model(
__lowerCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowerCAmelCase , tf.keras.Model )
_UpperCAmelCase = model(__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_UpperCAmelCase = outputs.last_hidden_state.numpy()
_UpperCAmelCase = 0
else:
_UpperCAmelCase = outputs.logits.numpy()
_UpperCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase , saved_model=__lowerCAmelCase )
_UpperCAmelCase = model_class.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_UpperCAmelCase = after_outputs['''last_hidden_state'''].numpy()
_UpperCAmelCase = 0
else:
_UpperCAmelCase = after_outputs['''logits'''].numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase , 1e-5 )
def lowerCAmelCase_ ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , noise=__lowerCAmelCase )
_UpperCAmelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCAmelCase )
_UpperCAmelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_UpperCAmelCase = model_class.from_config(model.config )
_UpperCAmelCase = new_model(__lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
_UpperCAmelCase = new_model(__lowerCAmelCase , noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : str ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_UpperCAmelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCAmelCase = ViTMAEConfig()
_UpperCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_UpperCAmelCase = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
# verify the logits
_UpperCAmelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 289 |
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_A : str = int(input('Enter number: ').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 142 | 0 |
"""simple docstring"""
import math
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = []
__UpperCAmelCase = 2
__UpperCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
__UpperCAmelCase = [True] * (end + 1)
__UpperCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
__UpperCAmelCase = False
start += 1
prime += in_prime
__UpperCAmelCase = end + 1
__UpperCAmelCase = min(2 * end , snake_case_ )
while low <= n:
__UpperCAmelCase = [True] * (high - low + 1)
for each in in_prime:
__UpperCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
__UpperCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
__UpperCAmelCase = high + 1
__UpperCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(10**6))
| 352 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Union[str, Any] = ["onnx"]
def __init__( self : Any , *_lowercase : Dict , **_lowercase : Any ):
requires_backends(self , ['''onnx'''] )
@classmethod
def a ( cls : str , *_lowercase : List[Any] , **_lowercase : int ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def a ( cls : Union[str, Any] , *_lowercase : List[str] , **_lowercase : Optional[int] ):
requires_backends(cls , ['''onnx'''] )
| 86 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( a_ : str , a_ : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE :int = old_name
if "patch_embed" in old_name:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = old_name.split('''.''' )
if layer == "0":
__SCREAMING_SNAKE_CASE :List[str] = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__SCREAMING_SNAKE_CASE :Optional[int] = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__SCREAMING_SNAKE_CASE :Optional[Any] = old_name.replace('''3''' , '''convolution2''' )
else:
__SCREAMING_SNAKE_CASE :Optional[int] = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , a_ ):
__SCREAMING_SNAKE_CASE :Dict = r'''\b\d{2}\b'''
if bool(re.search(a_ , a_ ) ):
__SCREAMING_SNAKE_CASE :List[str] = re.search(r'''\d\.\d\d.''' , a_ ).group()
else:
__SCREAMING_SNAKE_CASE :Dict = re.search(r'''\d\.\d.''' , a_ ).group()
if int(match[0] ) < 6:
__SCREAMING_SNAKE_CASE :str = old_name.replace(a_ , '''''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__SCREAMING_SNAKE_CASE :Optional[Any] = '''intermediate_stages.''' + trimmed_name
else:
__SCREAMING_SNAKE_CASE :Any = old_name.replace(a_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__SCREAMING_SNAKE_CASE :List[str] = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__SCREAMING_SNAKE_CASE :List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
__SCREAMING_SNAKE_CASE :Any = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__SCREAMING_SNAKE_CASE :Any = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__SCREAMING_SNAKE_CASE :str = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__SCREAMING_SNAKE_CASE :Union[str, Any] = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__SCREAMING_SNAKE_CASE :str = trimmed_name.replace('''fc2''' , '''linear_out''' )
__SCREAMING_SNAKE_CASE :List[Any] = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , a_ ):
__SCREAMING_SNAKE_CASE :List[str] = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__SCREAMING_SNAKE_CASE :int = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__SCREAMING_SNAKE_CASE :str = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__SCREAMING_SNAKE_CASE :Union[str, Any] = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__SCREAMING_SNAKE_CASE :Tuple = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__SCREAMING_SNAKE_CASE :List[Any] = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__SCREAMING_SNAKE_CASE :Tuple = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__SCREAMING_SNAKE_CASE :Any = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__SCREAMING_SNAKE_CASE :int = new_name.replace('''norm''' , '''layernorm''' )
__SCREAMING_SNAKE_CASE :Any = '''efficientformer.''' + new_name
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''efficientformer.encoder.''' + new_name
return new_name
def __lowerCamelCase ( a_ : List[Any] , a_ : List[Any] ) -> Optional[Any]:
for key in checkpoint.copy().keys():
__SCREAMING_SNAKE_CASE :Optional[Any] = checkpoint.pop(a_ )
__SCREAMING_SNAKE_CASE :int = val
return checkpoint
def __lowerCamelCase ( ) -> Dict:
__SCREAMING_SNAKE_CASE :Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE :Dict = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
def __lowerCamelCase ( a_ : Path , a_ : Path , a_ : Path , a_ : bool ) -> Tuple:
__SCREAMING_SNAKE_CASE :Optional[int] = torch.load(a_ , map_location='''cpu''' )['''model''']
__SCREAMING_SNAKE_CASE :str = EfficientFormerConfig.from_json_file(a_ )
__SCREAMING_SNAKE_CASE :Tuple = EfficientFormerForImageClassificationWithTeacher(a_ )
__SCREAMING_SNAKE_CASE :Any = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__SCREAMING_SNAKE_CASE :List[Any] = config.depths[-1] - config.num_metaad_blocks + 1
__SCREAMING_SNAKE_CASE :Any = convert_torch_checkpoint(a_ , a_ )
model.load_state_dict(a_ )
model.eval()
__SCREAMING_SNAKE_CASE :List[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__SCREAMING_SNAKE_CASE :int = prepare_img()
__SCREAMING_SNAKE_CASE :int = 2_56
__SCREAMING_SNAKE_CASE :List[Any] = 2_24
__SCREAMING_SNAKE_CASE :List[Any] = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__SCREAMING_SNAKE_CASE :int = processor(images=a_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__SCREAMING_SNAKE_CASE :List[Any] = Compose(
[
Resize(a_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(a_ ),
ToTensor(),
Normalize(a_ , a_ ),
] )
__SCREAMING_SNAKE_CASE :List[Any] = image_transforms(a_ ).unsqueeze(0 )
assert torch.allclose(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = model(a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = outputs.logits
__SCREAMING_SNAKE_CASE :Optional[Any] = (1, 10_00)
if "l1" in model_name:
__SCREAMING_SNAKE_CASE :Any = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , a_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__SCREAMING_SNAKE_CASE :str = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , a_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__SCREAMING_SNAKE_CASE :int = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(a_ )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=a_ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=a_ , )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
) | 191 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :List[str] = 1
__SCREAMING_SNAKE_CASE :Dict = 2
while i * i <= n:
__SCREAMING_SNAKE_CASE :Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCamelCase ( ) -> int:
__SCREAMING_SNAKE_CASE :Dict = 1
__SCREAMING_SNAKE_CASE :Dict = 1
while True:
i += 1
t_num += i
if count_divisors(a_ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution()) | 191 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case = True , _snake_case = None , _snake_case = 32 , _snake_case = True , _snake_case = 1 / 255 , _snake_case = True , _snake_case = True , _snake_case = [0.4814_5466, 0.457_8275, 0.4082_1073] , _snake_case = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _snake_case = True , _snake_case=7 , _snake_case=30 , _snake_case=400 , _snake_case=3 , ) -> Any:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = do_resize
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase = size_divisor
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = do_center_crop
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_pad
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
def snake_case_ ( self ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case_ ( self , _snake_case , _snake_case=False ) -> Dict:
"""simple docstring"""
if not batched:
UpperCAmelCase = self.size['''shortest_edge''']
UpperCAmelCase = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
UpperCAmelCase , UpperCAmelCase = image.size
else:
UpperCAmelCase , UpperCAmelCase = image.shape[1], image.shape[2]
UpperCAmelCase = size / min(_lowercase , _lowercase )
if h < w:
UpperCAmelCase , UpperCAmelCase = size, scale * w
else:
UpperCAmelCase , UpperCAmelCase = scale * h, size
UpperCAmelCase = int((1333 / 800) * size )
if max(_lowercase , _lowercase ) > max_size:
UpperCAmelCase = max_size / max(_lowercase , _lowercase )
UpperCAmelCase = newh * scale
UpperCAmelCase = neww * scale
UpperCAmelCase , UpperCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase , UpperCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase = max(_lowercase , key=lambda _snake_case : item[0] )[0]
UpperCAmelCase = max(_lowercase , key=lambda _snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''size_divisor''' ) )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
pass
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
# Initialize image processor
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self ) -> int:
"""simple docstring"""
# Initialize image processor
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
# Initialize image processor
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 366 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowercase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__( self , _snake_case=None , **_snake_case ) -> int:
"""simple docstring"""
super().__init__(features=_snake_case )
UpperCAmelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
import torch
if isinstance(_snake_case , _snake_case ) and column:
if all(
isinstance(_snake_case , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_snake_case )
return column
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
import torch
if isinstance(_snake_case , (str, bytes, type(_snake_case )) ):
return value
elif isinstance(_snake_case , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase = {}
if isinstance(_snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCAmelCase = {'''dtype''': torch.intaa}
elif isinstance(_snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_snake_case , PIL.Image.Image ):
UpperCAmelCase = np.asarray(_snake_case )
return torch.tensor(_snake_case , **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self , _snake_case ) -> Optional[Any]:
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(_snake_case , '''__array__''' ) and not isinstance(_snake_case , torch.Tensor ):
UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_snake_case , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] )
elif isinstance(_snake_case , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] )
return self._tensorize(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , _snake_case , map_list=_snake_case )
def snake_case_ ( self , _snake_case ) -> Mapping:
"""simple docstring"""
UpperCAmelCase = self.numpy_arrow_extractor().extract_row(_snake_case )
UpperCAmelCase = self.python_features_decoder.decode_row(_snake_case )
return self.recursive_tensorize(_snake_case )
def snake_case_ ( self , _snake_case ) -> "torch.Tensor":
"""simple docstring"""
UpperCAmelCase = self.numpy_arrow_extractor().extract_column(_snake_case )
UpperCAmelCase = self.python_features_decoder.decode_column(_snake_case , pa_table.column_names[0] )
UpperCAmelCase = self.recursive_tensorize(_snake_case )
UpperCAmelCase = self._consolidate(_snake_case )
return column
def snake_case_ ( self , _snake_case ) -> Mapping:
"""simple docstring"""
UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(_snake_case )
UpperCAmelCase = self.python_features_decoder.decode_batch(_snake_case )
UpperCAmelCase = self.recursive_tensorize(_snake_case )
for column_name in batch:
UpperCAmelCase = self._consolidate(batch[column_name] )
return batch
| 152 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {'mobilebert-uncased': 5_1_2}
UpperCamelCase__ = {}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = MobileBertTokenizer
def __init__(self : List[str] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[int]="[UNK]" , __UpperCAmelCase : List[str]="[SEP]" , __UpperCAmelCase : Tuple="[PAD]" , __UpperCAmelCase : int="[CLS]" , __UpperCAmelCase : Tuple="[MASK]" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : str=None , **__UpperCAmelCase : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**__UpperCAmelCase )
UpperCAmelCase__ = do_lower_case
def lowercase_ (self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ (self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ (self : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 65 | from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[torch.FloatTensor] = None
__UpperCAmelCase : torch.FloatTensor = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=5_1_2 , __UpperCAmelCase : List[str]="cls" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=True , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = project_dim
UpperCAmelCase__ = pooler_fn
UpperCAmelCase__ = learn_encoder
UpperCAmelCase__ = use_attention_mask
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [r'pooler', r'logit_scale']
__UpperCAmelCase : int = [r'position_ids', r'predictions.decoder.bias']
__UpperCAmelCase : Any = 'roberta'
__UpperCAmelCase : List[str] = RobertaSeriesConfig
def __init__(self : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = XLMRobertaModel(__UpperCAmelCase )
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = getattr(__UpperCAmelCase , "has_pre_transformation" , __UpperCAmelCase )
if self.has_pre_transformation:
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.base_model(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCAmelCase , )
if self.has_pre_transformation:
UpperCAmelCase__ = outputs["hidden_states"][-2]
UpperCAmelCase__ = self.pre_LN(__UpperCAmelCase )
UpperCAmelCase__ = self.transformation_pre(__UpperCAmelCase )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 65 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-6 , lowercase=0.0_2 , lowercase=True , **lowercase , ) -> Tuple:
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_embd
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = dff
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = use_cache
super().__init__(**lowercase )
| 47 |
from sklearn.metrics import recall_score
import datasets
__A ='''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__A ='''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__A ='''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase=1 , lowercase="binary" , lowercase=None , lowercase="warn" , ) -> Optional[int]:
lowerCamelCase_ = recall_score(
lowercase , lowercase , labels=lowercase , pos_label=lowercase , average=lowercase , sample_weight=lowercase , zero_division=lowercase , )
return {"recall": float(lowercase ) if score.size == 1 else score}
| 47 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase_ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 0 |
from collections import deque
def a( A : str ) -> List[Any]:
"""simple docstring"""
a = len(A )
a = deque()
a = [False for _ in range(A )]
a = [-1 for _ in range(A )]
a = index_of[:]
def strong_connect(A : List[str] , A : int , A : Optional[int] ):
a = index # the number when this node is seen
a = index # lowest rank node reachable from here
index += 1
stack.append(A )
a = True
for w in g[v]:
if index_of[w] == -1:
a = strong_connect(A , A , A )
a = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
a = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
a = []
a = stack.pop()
a = False
component.append(A )
while w != v:
a = stack.pop()
a = False
component.append(A )
components.append(A )
return index
a = []
for v in range(A ):
if index_of[v] == -1:
strong_connect(A , 0 , A )
return components
def a( A : Optional[Any] , A : Dict ) -> List[str]:
"""simple docstring"""
a = [[] for _ in range(A )]
for u, v in edges:
g[u].append(A )
return g
if __name__ == "__main__":
# Test
_lowercase: Dict = 7
_lowercase: Dict = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowercase: int = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowercase: Union[str, Any] = [(u, v) for u, v in zip(source, target)]
_lowercase: List[str] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 71 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: Optional[Any] = logging.get_logger(__name__)
_lowercase: Any = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git_vision_model"
def __init__(self , lowerCamelCase_=768 , lowerCamelCase_=3072 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3 , lowerCamelCase_=224 , lowerCamelCase_=16 , lowerCamelCase_="quick_gelu" , lowerCamelCase_=1E-5 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
a = num_channels
a = patch_size
a = image_size
a = initializer_range
a = attention_dropout
a = layer_norm_eps
a = hidden_act
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase_ )
a , a = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git"
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=30522 , lowerCamelCase_=768 , lowerCamelCase_=6 , lowerCamelCase_=12 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1024 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=0 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=101 , lowerCamelCase_=102 , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
a = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
a = GitVisionConfig(**lowerCamelCase_ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = tie_word_embeddings
a = num_image_with_embedding
a = bos_token_id
a = eos_token_id
def UpperCamelCase_ (self ):
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 71 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : float
_UpperCAmelCase : TreeNode | None = None
_UpperCAmelCase : TreeNode | None = None
def A_ ( _UpperCAmelCase ):
# Validation
def is_valid_tree(_UpperCAmelCase ) -> bool:
if node is None:
return True
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_UpperCAmelCase ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _UpperCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _UpperCAmelCase )
)
return is_binary_search_tree_recursive_check(_UpperCAmelCase , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : Tuple ="FlavaImageProcessor"
UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = self.image_processor
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(
UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 326 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
UpperCAmelCase = False
UpperCAmelCase = False
def _snake_case ( _SCREAMING_SNAKE_CASE : Namespace ) -> Tuple:
"""simple docstring"""
return TrainCommand(_SCREAMING_SNAKE_CASE )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def __snake_case ( A_ ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=A_ , required=A_ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=A_ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=A_ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=A_ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=A_ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=A_ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=A_ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=A_ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=A_ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=A_ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=A_ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=A_ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=A_ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=A_ )
def __init__( self , A_ ) -> Tuple:
lowerCAmelCase = logging.get_logger("""transformers-cli/training""" )
lowerCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=A_ )
lowerCAmelCase = args.output
lowerCAmelCase = args.column_label
lowerCAmelCase = args.column_text
lowerCAmelCase = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase = args.validation_split
lowerCAmelCase = args.train_batch_size
lowerCAmelCase = args.valid_batch_size
lowerCAmelCase = args.learning_rate
lowerCAmelCase = args.adam_epsilon
def __snake_case ( self ) -> Optional[int]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __snake_case ( self ) -> Tuple:
raise NotImplementedError
def __snake_case ( self ) -> Tuple:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 187 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , A_ ) -> str:
lowerCAmelCase = 3
lowerCAmelCase = 250
lowerCAmelCase = ids_tensor((batch_size, length) , A_ )
lowerCAmelCase = torch.ones((batch_size, length) , device=A_ , dtype=torch.float ) / length
return input_ids, scores
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = MaxLengthCriteria(max_length=10 )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
lowerCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> Optional[int]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(A_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(A_ ) , 1 ) | 187 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Tuple ={
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCAmelCase : str ='''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowerCAmelCase : List[str] =requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowerCAmelCase : List[Any] =BeautifulSoup(res.text, '''html.parser''')
lowerCAmelCase : List[Any] =list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 223 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : List[str] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 351 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCAmelCase : str = False
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger """
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = generator.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger """
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 267 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __magic_name__ ( tf.keras.layers.Layer ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False , **snake_case) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : Dict =vocab_size
_UpperCAmelCase : Any =d_embed
_UpperCAmelCase : Optional[Any] =d_proj
_UpperCAmelCase : List[str] =cutoffs + [vocab_size]
_UpperCAmelCase : Dict =[0] + self.cutoffs
_UpperCAmelCase : Optional[int] =div_val
_UpperCAmelCase : List[Any] =self.cutoffs[0]
_UpperCAmelCase : Any =len(self.cutoffs) - 1
_UpperCAmelCase : Tuple =self.shortlist_size + self.n_clusters
_UpperCAmelCase : Union[str, Any] =keep_order
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Optional[int] =[]
def lowerCAmelCase ( self , snake_case) -> Tuple:
'''simple docstring'''
if self.n_clusters > 0:
_UpperCAmelCase : List[Any] =self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=snake_case , name='cluster_weight')
_UpperCAmelCase : Optional[int] =self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=snake_case , name='cluster_bias')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
_UpperCAmelCase : Union[str, Any] =self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=snake_case , name=f"out_projs_._{i}" , )
self.out_projs.append(snake_case)
else:
self.out_projs.append(snake_case)
_UpperCAmelCase : List[Any] =self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=snake_case , name=f"out_layers_._{i}_._weight" , )
_UpperCAmelCase : str =self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=snake_case , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
_UpperCAmelCase : Any =self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Optional[Any] =self.d_embed // (self.div_val**i)
_UpperCAmelCase : Optional[int] =self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=snake_case , name=f"out_projs_._{i}")
self.out_projs.append(snake_case)
_UpperCAmelCase : Union[str, Any] =self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=snake_case , name=f"out_layers_._{i}_._weight" , )
_UpperCAmelCase : str =self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=snake_case , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias))
super().build(snake_case)
@staticmethod
def lowerCAmelCase ( snake_case , snake_case , snake_case , snake_case=None) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =x
if proj is not None:
_UpperCAmelCase : Dict =tf.einsum('ibd,ed->ibe' , snake_case , snake_case)
return tf.einsum('ibd,nd->ibn' , snake_case , snake_case) + b
@staticmethod
def lowerCAmelCase ( snake_case , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str =shape_list(snake_case)
_UpperCAmelCase : Union[str, Any] =tf.range(lp_size[0] , dtype=target.dtype)
_UpperCAmelCase : List[Any] =tf.stack([r, target] , 1)
return tf.gather_nd(snake_case , snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=True , snake_case=False) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =0
if self.n_clusters == 0:
_UpperCAmelCase : Optional[int] =self._logit(snake_case , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0])
if target is not None:
_UpperCAmelCase : Union[str, Any] =tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case , logits=snake_case)
_UpperCAmelCase : Optional[Any] =tf.nn.log_softmax(snake_case , axis=-1)
else:
_UpperCAmelCase : List[Any] =shape_list(snake_case)
_UpperCAmelCase : Optional[Any] =[]
_UpperCAmelCase : List[Any] =tf.zeros(hidden_sizes[:2])
for i in range(len(self.cutoffs)):
_UpperCAmelCase : Optional[int] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_UpperCAmelCase : List[Any] =(target >= l_idx) & (target < r_idx)
_UpperCAmelCase : int =tf.where(snake_case)
_UpperCAmelCase : List[str] =tf.boolean_mask(snake_case , snake_case) - l_idx
if self.div_val == 1:
_UpperCAmelCase : Tuple =self.out_layers[0][0][l_idx:r_idx]
_UpperCAmelCase : str =self.out_layers[0][1][l_idx:r_idx]
else:
_UpperCAmelCase : Optional[int] =self.out_layers[i][0]
_UpperCAmelCase : str =self.out_layers[i][1]
if i == 0:
_UpperCAmelCase : Tuple =tf.concat([cur_W, self.cluster_weight] , 0)
_UpperCAmelCase : Optional[Any] =tf.concat([cur_b, self.cluster_bias] , 0)
_UpperCAmelCase : str =self._logit(snake_case , snake_case , snake_case , self.out_projs[0])
_UpperCAmelCase : Union[str, Any] =tf.nn.log_softmax(snake_case)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
_UpperCAmelCase : Optional[Any] =tf.boolean_mask(snake_case , snake_case)
_UpperCAmelCase : List[str] =self._gather_logprob(snake_case , snake_case)
else:
_UpperCAmelCase : List[str] =self._logit(snake_case , snake_case , snake_case , self.out_projs[i])
_UpperCAmelCase : Any =tf.nn.log_softmax(snake_case)
_UpperCAmelCase : List[Any] =self.cutoffs[0] + i - 1 # No probability for the head cluster
_UpperCAmelCase : int =head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case)
if target is not None:
_UpperCAmelCase : Any =tf.boolean_mask(snake_case , snake_case)
_UpperCAmelCase : Any =tf.boolean_mask(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =self._gather_logprob(snake_case , snake_case)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case , -cur_logprob , shape_list(snake_case))
_UpperCAmelCase : Dict =tf.concat(snake_case , axis=-1)
if target is not None:
if return_mean:
_UpperCAmelCase : Optional[int] =tf.reduce_mean(snake_case)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case , name=self.name , aggregation='mean' if return_mean else '')
return out
| 365 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] ):
'''simple docstring'''
if not nums:
return 0
_UpperCAmelCase : Tuple =nums[0]
_UpperCAmelCase : int =0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =(
max_excluding + num,
max(__lowerCamelCase , __lowerCamelCase ),
)
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = '''visual_bert'''
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = visual_embedding_dim
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Any = bypass_transformer
UpperCAmelCase_ : Union[str, Any] = special_visual_initialize
| 29 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
lowerCAmelCase : Optional[datasets.Features] = None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[int]:
import pyspark
def generate_fn():
_a : str = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
_a : List[Any] = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' )
_a : Optional[int] = partition_df.collect()
_a : Any = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable ):
def __init__( self : Dict ,_UpperCAmelCase : "pyspark.sql.DataFrame" ,_UpperCAmelCase : Any=None ,):
_a : List[str] = df
_a : str = partition_order or range(self.df.rdd.getNumPartitions() )
_a : Union[str, Any] = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self : Dict ):
yield from self.generate_examples_fn()
def __lowercase ( self : str ,_UpperCAmelCase : np.random.Generator ):
_a : int = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_UpperCAmelCase )
return SparkExamplesIterable(self.df ,partition_order=_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
_a : Union[str, Any] = self.split_shard_indices_by_worker(_UpperCAmelCase ,_UpperCAmelCase )
return SparkExamplesIterable(self.df ,partition_order=_UpperCAmelCase )
@property
def __lowercase ( self : Union[str, Any] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder ):
lowerCAmelCase : Optional[Any] = SparkConfig
def __init__( self : List[Any] ,_UpperCAmelCase : "pyspark.sql.DataFrame" ,_UpperCAmelCase : str = None ,_UpperCAmelCase : str = None ,**_UpperCAmelCase : int ,):
import pyspark
_a : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
_a : int = df
_a : int = working_dir
super().__init__(
cache_dir=_UpperCAmelCase ,config_name=str(self.df.semanticHash() ) ,**_UpperCAmelCase ,)
def __lowercase ( self : int ):
# Returns the path of the created file.
def create_cache_and_write_probe(_UpperCAmelCase : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=_UpperCAmelCase )
_a : List[Any] = os.path.join(self._cache_dir ,'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_UpperCAmelCase ,'a' )
return [probe_file]
if self._spark.conf.get('spark.master' ,'' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_a : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(_UpperCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowercase ( self : List[Any] ):
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : Any ,_UpperCAmelCase : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[str] ):
import pyspark
def get_arrow_batch_size(_UpperCAmelCase : Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
_a : int = self.df.count()
_a : Dict = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_a : Optional[Any] = (
self.df.limit(_UpperCAmelCase )
.repartition(1 )
.mapInArrow(_UpperCAmelCase ,'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_a : Optional[int] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_a : List[Any] = min(_UpperCAmelCase ,int(approx_total_size / max_shard_size ) )
_a : Union[str, Any] = self.df.repartition(_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,):
import pyspark
_a : int = ParquetWriter if file_format == 'parquet' else ArrowWriter
_a : Any = os.path.join(self._working_dir ,os.path.basename(_UpperCAmelCase ) ) if self._working_dir else fpath
_a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_a : List[Any] = self.config.features
_a : Optional[Any] = self._writer_batch_size
_a : str = self._fs.storage_options
def write_arrow(_UpperCAmelCase : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_a : Optional[int] = pyspark.TaskContext().taskAttemptId()
_a : Optional[Any] = next(_UpperCAmelCase ,_UpperCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
_a : Union[str, Any] = 0
_a : Any = writer_class(
features=_UpperCAmelCase ,path=working_fpath.replace('SSSSS' ,F"""{shard_id:05d}""" ).replace('TTTTT' ,F"""{task_id:05d}""" ) ,writer_batch_size=_UpperCAmelCase ,storage_options=_UpperCAmelCase ,embed_local_files=_UpperCAmelCase ,)
_a : Any = pa.Table.from_batches([first_batch] )
writer.write_table(_UpperCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
shard_id += 1
_a : List[str] = writer_class(
features=writer._features ,path=working_fpath.replace('SSSSS' ,F"""{shard_id:05d}""" ).replace('TTTTT' ,F"""{task_id:05d}""" ) ,writer_batch_size=_UpperCAmelCase ,storage_options=_UpperCAmelCase ,embed_local_files=_UpperCAmelCase ,)
_a : Optional[Any] = pa.Table.from_batches([batch] )
writer.write_table(_UpperCAmelCase )
if writer._num_bytes > 0:
_a : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_UpperCAmelCase ) ):
_a : Tuple = os.path.join(os.path.dirname(_UpperCAmelCase ) ,os.path.basename(_UpperCAmelCase ) )
shutil.move(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[Any] = (
self.df.mapInArrow(_UpperCAmelCase ,'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) ,pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) ,pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) ,pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowercase ( self : int ,_UpperCAmelCase : "datasets.SplitGenerator" ,_UpperCAmelCase : str = "arrow" ,_UpperCAmelCase : Optional[Union[str, int]] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : int ,):
self._validate_cache_dir()
_a : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_UpperCAmelCase )
_a : int = not is_remote_filesystem(self._fs )
_a : List[Any] = os.path.join if is_local else posixpath.join
_a : List[str] = '-TTTTT-SSSSS-of-NNNNN'
_a : Optional[int] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_a : Union[str, Any] = path_join(self._output_dir ,_UpperCAmelCase )
_a : Dict = 0
_a : List[Any] = 0
_a : Tuple = 0
_a : Dict = []
_a : List[Any] = []
for task_id, content in self._prepare_split_single(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
(
_a
) : List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_UpperCAmelCase )
_a : Any = total_num_examples
_a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_a : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_a : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,):
rename(
_UpperCAmelCase ,fpath.replace('SSSSS' ,F"""{shard_id:05d}""" ).replace('TTTTT' ,F"""{task_id:05d}""" ) ,fpath.replace('TTTTT-SSSSS' ,F"""{global_shard_id:05d}""" ).replace('NNNNN' ,F"""{total_shards:05d}""" ) ,)
_a : Optional[Any] = []
_a : Optional[Any] = 0
for i in range(len(_UpperCAmelCase ) ):
_a : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(_UpperCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_UpperCAmelCase ,len(_UpperCAmelCase ) ).map(lambda _UpperCAmelCase : _rename_shard(*_UpperCAmelCase ) ).collect()
else:
# don't use any pattern
_a : str = 0
_a : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' ,F"""{shard_id:05d}""" ).replace('TTTTT' ,F"""{task_id:05d}""" ) ,fpath.replace(_UpperCAmelCase ,'' ) ,)
def __lowercase ( self : List[str] ,_UpperCAmelCase : "datasets.SplitGenerator" ,):
return SparkExamplesIterable(self.df )
| 352 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : int ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase ,_UpperCAmelCase ,delta=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_UpperCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step ,3 )
self.assertEqual(len(accumulator.gradients ) ,1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 )
def __lowercase ( self : Any ):
_a : int = None
ops.enable_eager_execution_internal()
_a : Optional[int] = tf.config.list_physical_devices('CPU' )
if len(_UpperCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_a : Tuple = tf.config.list_logical_devices(device_type='CPU' )
_a : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_a : Tuple = GradientAccumulator()
_a : List[Any] = tf.Variable([4.0, 3.0] )
_a , _a : Dict = create_optimizer(5E-5 ,10 ,5 )
_a : Tuple = tf.Variable([0.0, 0.0] ,trainable=_UpperCAmelCase )
def accumulate_on_replica(_UpperCAmelCase : str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) )
@tf.function
def accumulate(_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ):
with strategy.scope():
_a : Union[str, Any] = strategy.experimental_local_results(_UpperCAmelCase )
local_variables[0].assign(_UpperCAmelCase )
local_variables[1].assign(_UpperCAmelCase )
strategy.run(_UpperCAmelCase ,args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_UpperCAmelCase )
def _check_local_values(_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ):
_a : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() ,_UpperCAmelCase ,tol=1E-2 )
self.assertListAlmostEqual(values[1].value() ,_UpperCAmelCase ,tol=1E-2 )
accumulate([1.0, 2.0] ,[-1.0, 1.0] )
accumulate([3.0, -1.0] ,[-1.0, -1.0] )
accumulate([-2.0, 2.0] ,[3.0, -2.0] )
self.assertEqual(accumulator.step ,3 )
_check_local_values([2.0, 3.0] ,[1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
_check_local_values([0.0, 0.0] ,[0.0, 0.0] )
| 107 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : str = original_name.split(""".""" )[0]
_A : List[str] = key.split(""".""" )
_A : Tuple = int(key_list[key_list.index(snake_case_ ) - 2] )
_A : Dict = int(key_list[key_list.index(snake_case_ ) - 1] )
_A : Any = orig_block_num - offset
_A : Dict = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''',f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = OrderedDict()
_A : int = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
_A : str = key.replace("""network""","""poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
_A : Dict = key[: key.find("""proj""" )]
_A : int = key.replace(snake_case_,f'''patch_embeddings.{total_embed_found}.''' )
_A : Dict = key.replace("""proj""","""projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
_A : Dict = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_A : Dict = replace_key_with_offset(snake_case_,snake_case_,"""mlp.fc1""","""output.conv1""" )
if "mlp.fc2" in key:
_A : List[Any] = replace_key_with_offset(snake_case_,snake_case_,"""mlp.fc2""","""output.conv2""" )
if "norm1" in key:
_A : List[str] = replace_key_with_offset(snake_case_,snake_case_,"""norm1""","""before_norm""" )
if "norm2" in key:
_A : Dict = replace_key_with_offset(snake_case_,snake_case_,"""norm2""","""after_norm""" )
if "layer_scale_1" in key:
_A : Optional[Any] = replace_key_with_offset(snake_case_,snake_case_,"""layer_scale_1""","""layer_scale_1""" )
if "layer_scale_2" in key:
_A : Tuple = replace_key_with_offset(snake_case_,snake_case_,"""layer_scale_2""","""layer_scale_2""" )
if "head" in key:
_A : Union[str, Any] = key.replace("""head""","""classifier""" )
_A : int = value
return new_state_dict
def lowerCAmelCase_ ( ):
_A : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A : List[str] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = PoolFormerConfig()
# set attributes based on model_name
_A : str = 'huggingface/label-files'
_A : List[Any] = model_name[-3:]
_A : Optional[int] = 1000
_A : Tuple = 'imagenet-1k-id2label.json'
_A : Union[str, Any] = (1, 1000)
# set config attributes
_A : Optional[int] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : Any = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : int = idalabel
_A : List[str] = {v: k for k, v in idalabel.items()}
if size == "s12":
_A : str = [2, 2, 6, 2]
_A : Optional[int] = [64, 128, 320, 512]
_A : Union[str, Any] = 4.0
_A : str = 0.9
elif size == "s24":
_A : List[str] = [4, 4, 12, 4]
_A : Any = [64, 128, 320, 512]
_A : List[str] = 4.0
_A : Any = 0.9
elif size == "s36":
_A : int = [6, 6, 18, 6]
_A : int = [64, 128, 320, 512]
_A : Dict = 4.0
_A : Dict = 1e-6
_A : Any = 0.9
elif size == "m36":
_A : int = [6, 6, 18, 6]
_A : Dict = [96, 192, 384, 768]
_A : Tuple = 4.0
_A : Union[str, Any] = 1e-6
_A : str = 0.95
elif size == "m48":
_A : Union[str, Any] = [8, 8, 24, 8]
_A : Optional[int] = [96, 192, 384, 768]
_A : Optional[Any] = 4.0
_A : int = 1e-6
_A : Optional[Any] = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
_A : List[Any] = PoolFormerImageProcessor(crop_pct=snake_case_ )
# Prepare image
_A : Optional[Any] = prepare_img()
_A : Optional[Any] = image_processor(images=snake_case_,return_tensors="""pt""" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
_A : Optional[Any] = torch.load(snake_case_,map_location=torch.device("""cpu""" ) )
# rename keys
_A : Union[str, Any] = rename_keys(snake_case_ )
# create HuggingFace model and load state dict
_A : str = PoolFormerForImageClassification(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Define image processor
_A : Union[str, Any] = PoolFormerImageProcessor(crop_pct=snake_case_ )
_A : Any = image_processor(images=prepare_img(),return_tensors="""pt""" ).pixel_values
# forward pass
_A : List[Any] = model(snake_case_ )
_A : List[Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
_A : List[Any] = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_A : Optional[int] = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_A : Optional[Any] = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_A : Any = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_A : Optional[int] = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 26 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a_ :List[Any] = logging.get_logger(__name__)
a_ :List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
a_ :List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowercase_ (A : Dict ):
snake_case__ : Optional[Any] = {}
with open(A , 'r' ) as file:
for line_number, line in enumerate(A ):
snake_case__ : Dict = line.strip()
if line:
snake_case__ : int = line.split()
snake_case__ : List[str] = line_number
snake_case__ : Dict = words[0]
snake_case__ : Optional[Any] = value
return result
def lowercase_ (A : int , A : int , A : Optional[int] , A : Optional[Any] , A : Tuple ):
for attribute in key.split('.' ):
snake_case__ : Optional[int] = getattr(A , A )
snake_case__ : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
snake_case__ : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]]
snake_case__ : Dict = 'param'
if weight_type is not None and weight_type != "param":
snake_case__ : Union[str, Any] = getattr(A , A ).shape
elif weight_type is not None and weight_type == "param":
snake_case__ : Optional[int] = hf_pointer
for attribute in hf_param_name.split('.' ):
snake_case__ : Optional[Any] = getattr(A , A )
snake_case__ : Dict = shape_pointer.shape
# let's reduce dimension
snake_case__ : List[Any] = value[0]
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : Any = value
elif weight_type == "weight_g":
snake_case__ : List[Any] = value
elif weight_type == "weight_v":
snake_case__ : Any = value
elif weight_type == "bias":
snake_case__ : List[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
snake_case__ : int = getattr(A , A )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase_ (A : Tuple , A : List[Any] , A : int , A : str , A : Tuple ):
snake_case__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
snake_case__ : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]]
snake_case__ : str = 'param'
if weight_type is not None and weight_type != "param":
snake_case__ : int = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case__ : Any = '.'.join([key, hf_param_name] )
else:
snake_case__ : Dict = key
snake_case__ : List[str] = value if 'lm_head' in full_key else value[0]
a_ :List[str] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowercase_ (A : str , A : Optional[Any] , A : Optional[Any]=None , A : List[str]=None ):
snake_case__ : Optional[int] = False
for key, mapped_key in MAPPING.items():
snake_case__ : Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case__ : Optional[int] = True
if "*" in mapped_key:
snake_case__ : List[Any] = name.split(A )[0].split('.' )[-2]
snake_case__ : Union[str, Any] = mapped_key.replace('*' , A )
if "weight_g" in name:
snake_case__ : Tuple = 'weight_g'
elif "weight_v" in name:
snake_case__ : List[str] = 'weight_v'
elif "bias" in name:
snake_case__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[int] = 'weight'
else:
snake_case__ : str = None
if hf_dict is not None:
rename_dict(A , A , A , A , A )
else:
set_recursively(A , A , A , A , A )
return is_used
return is_used
def lowercase_ (A : Optional[Any] , A : Dict , A : Optional[int] ):
snake_case__ : Dict = []
snake_case__ : Tuple = fairseq_model.state_dict()
snake_case__ : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : str = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ : Any = True
else:
snake_case__ : Dict = load_wavaveca_layer(A , A , A )
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase_ (A : Dict , A : Optional[Any] , A : Tuple , A : str , A : List[str] ):
snake_case__ : List[Any] = full_name.split('conv_layers.' )[-1]
snake_case__ : List[str] = name.split('.' )
snake_case__ : List[Any] = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def lowercase_ (A : Union[str, Any] , A : str , A : Tuple=None , A : List[str]=None , A : Any=True , A : Optional[int]=False ):
if config_path is not None:
snake_case__ : List[Any] = WavaVecaConfig.from_pretrained(A )
else:
snake_case__ : List[Any] = WavaVecaConfig()
if is_seq_class:
snake_case__ : Dict = read_txt_into_dict(A )
snake_case__ : Any = idalabel
snake_case__ : Union[str, Any] = WavaVecaForSequenceClassification(A )
snake_case__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
feature_extractor.save_pretrained(A )
elif is_finetuned:
if dict_path:
snake_case__ : str = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ : List[str] = target_dict.pad_index
snake_case__ : Optional[int] = target_dict.bos_index
snake_case__ : Optional[int] = target_dict.eos_index
snake_case__ : List[Any] = len(target_dict.symbols )
snake_case__ : str = os.path.join(A , 'vocab.json' )
if not os.path.isdir(A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) )
return
os.makedirs(A , exist_ok=A )
snake_case__ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ : Optional[Any] = 0
snake_case__ : Union[str, Any] = 1
with open(A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(A , A )
snake_case__ : List[Any] = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , )
snake_case__ : str = True if config.feat_extract_norm == 'layer' else False
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
snake_case__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
snake_case__ : str = WavaVecaForCTC(A )
else:
snake_case__ : int = WavaVecaForPreTraining(A )
if is_finetuned or is_seq_class:
snake_case__ , snake_case__ , snake_case__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case__ : Tuple = argparse.Namespace(task='audio_pretraining' )
snake_case__ : str = fairseq.tasks.setup_task(A )
snake_case__ , snake_case__ , snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
snake_case__ : List[Any] = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
a_ :str = parser.parse_args()
a_ :Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 277 | 0 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
__UpperCAmelCase : List[str] = ""
for i in table:
res += inp[i - 1]
return res
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
return data[1:] + data[0]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : List[Any] = ""
for i in range(len(snake_case__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : int = int("0b" + data[0] + data[-1], 2 )
__UpperCAmelCase : str = int("0b" + data[1:3], 2 )
return bin(s[row][col] )[2:]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : int = message[:4]
__UpperCAmelCase : Dict = message[4:]
__UpperCAmelCase : Tuple = apply_table(snake_case__, snake_case__ )
__UpperCAmelCase : Dict = xor(snake_case__, snake_case__ )
__UpperCAmelCase : Dict = apply_sbox(snake_case__, temp[:4] ) # noqa: E741
__UpperCAmelCase : List[str] = apply_sbox(snake_case__, temp[4:] )
__UpperCAmelCase : List[str] = "0" * (2 - len(snake_case__ )) + l # noqa: E741
__UpperCAmelCase : Optional[int] = "0" * (2 - len(snake_case__ )) + r
__UpperCAmelCase : Dict = apply_table(l + r, snake_case__ )
__UpperCAmelCase : List[Any] = xor(snake_case__, snake_case__ )
return temp + right
if __name__ == "__main__":
_snake_case = input('''Enter 10 bit key: ''')
_snake_case = input('''Enter 8 bit message: ''')
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 358 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , A : Collection[float] | None = None ) ->None:
if components is None:
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Dict = list(A )
def __len__( self : Optional[int] ) ->int:
return len(self.__components )
def __str__( self : Optional[Any] ) ->str:
return "(" + ",".join(map(A , self.__components ) ) + ")"
def __add__( self : Any , A : Vector ) ->Vector:
lowerCamelCase__ : List[Any] = len(self )
if size == len(A ):
lowerCamelCase__ : List[str] = [self.__components[i] + other.component(A ) for i in range(A )]
return Vector(A )
else:
raise Exception('''must have the same size''' )
def __sub__( self : Tuple , A : Vector ) ->Vector:
lowerCamelCase__ : Optional[Any] = len(self )
if size == len(A ):
lowerCamelCase__ : Optional[Any] = [self.__components[i] - other.component(A ) for i in range(A )]
return Vector(A )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : Any , A : float ) ->Vector:
...
@overload
def __mul__( self : Optional[int] , A : Vector ) ->float:
...
def __mul__( self : Any , A : float | Vector ) ->float | Vector:
if isinstance(A , (float, int) ):
lowerCamelCase__ : Any = [c * other for c in self.__components]
return Vector(A )
elif isinstance(A , A ) and len(self ) == len(A ):
lowerCamelCase__ : Optional[int] = len(self )
lowerCamelCase__ : Dict = [self.__components[i] * other.component(A ) for i in range(A )]
return sum(A )
else: # error case
raise Exception('''invalid operand!''' )
def __lowerCamelCase ( self : List[str] ) ->Vector:
return Vector(self.__components )
def __lowerCamelCase ( self : Any , A : int ) ->float:
if isinstance(A , A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def __lowerCamelCase ( self : Optional[Any] , A : int , A : float ) ->None:
assert -len(self.__components ) <= pos < len(self.__components )
lowerCamelCase__ : List[str] = value
def __lowerCamelCase ( self : Dict ) ->float:
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
lowerCamelCase__ : int = [c**2 for c in self.__components]
return math.sqrt(sum(A ) )
def __lowerCamelCase ( self : List[str] , A : Vector , A : bool = False ) ->float:
lowerCamelCase__ : str = self * other
lowerCamelCase__ : List[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( UpperCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase )
return Vector([0] * dimension )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (isinstance(UpperCAmelCase , UpperCAmelCase ))
lowerCamelCase__ : Tuple = [0] * dimension
lowerCamelCase__ : int = 1
return Vector(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Vector:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase , UpperCAmelCase )
and isinstance(UpperCAmelCase , UpperCAmelCase )
and (isinstance(UpperCAmelCase , (int, float) ))
)
return x * scalar + y
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Vector:
"""simple docstring"""
random.seed(UpperCAmelCase )
lowerCamelCase__ : Tuple = [random.randint(UpperCAmelCase , UpperCAmelCase ) for _ in range(UpperCAmelCase )]
return Vector(UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , A : list[list[float]] , A : int , A : int ) ->None:
lowerCamelCase__ : List[Any] = matrix
lowerCamelCase__ : str = w
lowerCamelCase__ : Any = h
def __str__( self : Dict ) ->str:
lowerCamelCase__ : List[Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , A : Matrix ) ->Matrix:
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase__ : Any = []
for i in range(self.__height ):
lowerCamelCase__ : Union[str, Any] = [
self.__matrix[i][j] + other.component(A , A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : List[str] , A : Matrix ) ->Matrix:
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase__ : List[str] = []
for i in range(self.__height ):
lowerCamelCase__ : Any = [
self.__matrix[i][j] - other.component(A , A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : Optional[int] , A : float ) ->Matrix:
...
@overload
def __mul__( self : List[str] , A : Vector ) ->Vector:
...
def __mul__( self : Dict , A : float | Vector ) ->Vector | Matrix:
if isinstance(A , A ): # matrix-vector
if len(A ) == self.__width:
lowerCamelCase__ : Dict = zero_vector(self.__height )
for i in range(self.__height ):
lowerCamelCase__ : Tuple = [
self.__matrix[i][j] * other.component(A )
for j in range(self.__width )
]
ans.change_component(A , sum(A ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(A , (int, float) ): # matrix-scalar
lowerCamelCase__ : Optional[Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A , self.__width , self.__height )
return None
def __lowerCamelCase ( self : int ) ->int:
return self.__height
def __lowerCamelCase ( self : Union[str, Any] ) ->int:
return self.__width
def __lowerCamelCase ( self : Dict , A : int , A : int ) ->float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def __lowerCamelCase ( self : Any , A : int , A : int , A : float ) ->None:
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCamelCase__ : Union[str, Any] = value
else:
raise Exception('''change_component: indices out of bounds''' )
def __lowerCamelCase ( self : int , A : int , A : int ) ->float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
lowerCamelCase__ : Dict = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A ) ):
lowerCamelCase__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A , self.__width - 1 , self.__height - 1 ).determinant()
def __lowerCamelCase ( self : Union[str, Any] , A : int , A : int ) ->float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A , A )
else:
raise Exception('''Indices out of bounds''' )
def __lowerCamelCase ( self : Tuple ) ->float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCamelCase__ : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , A ) for y in range(self.__width )
]
return sum(A )
def _a ( UpperCAmelCase ) -> Matrix:
"""simple docstring"""
lowerCamelCase__ : list[list[float]] = [[0] * n for _ in range(UpperCAmelCase )]
return Matrix(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Matrix:
"""simple docstring"""
random.seed(UpperCAmelCase )
lowerCamelCase__ : list[list[float]] = [
[random.randint(UpperCAmelCase , UpperCAmelCase ) for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )
]
return Matrix(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 142 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , A : Any ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = data
lowerCamelCase__ : Any = None
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->str:
lowerCamelCase__ : Any = None
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : str = self.head
while temp is not None:
print(temp.data , end=''' ''' )
lowerCamelCase__ : Dict = temp.next
print()
def __lowerCamelCase ( self : Dict , A : Any ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = Node(A )
lowerCamelCase__ : Dict = self.head
lowerCamelCase__ : List[str] = new_node
def __lowerCamelCase ( self : Optional[int] , A : int , A : Tuple ) ->List[Any]:
if node_data_a == node_data_a:
return
else:
lowerCamelCase__ : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Union[str, Any] = node_a.next
lowerCamelCase__ : int = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Optional[int] = node_a.next
if node_a is None or node_a is None:
return
lowerCamelCase__ , lowerCamelCase__ : str = node_a.data, node_a.data
if __name__ == "__main__":
_A : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 142 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
SCREAMING_SNAKE_CASE_:Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_:Any = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""MobileNetV2FeatureExtractor"""]
SCREAMING_SNAKE_CASE_:Tuple = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {'UserAgent': UserAgent().random}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> dict:
UpperCamelCase__ : Tuple = script.contents[0]
UpperCamelCase__ : Any = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = f"https://www.instagram.com/{username}/"
UpperCamelCase__ : Optional[int] = self.get_json()
def UpperCamelCase__ ( self ) -> dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = requests.get(self.url, headers=_A ).text
UpperCamelCase__ : Tuple = BeautifulSoup(_A, '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
"""simple docstring"""
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self ) -> str:
"""simple docstring"""
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def UpperCamelCase__ ( self ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def UpperCamelCase__ ( self ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowerCAmelCase_ ( __UpperCAmelCase: str = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
UpperCamelCase__ : List[str] = InstagramUser(__UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser('github')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 201 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''microsoft/speecht5_tts'''
snake_case_ = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
snake_case_ = '''text_reader'''
snake_case_ = SpeechTaProcessor
snake_case_ = SpeechTaForTextToSpeech
snake_case_ = SpeechTaHifiGan
snake_case_ = ['''text''']
snake_case_ = ['''audio''']
def _lowerCamelCase ( self ) -> List[Any]:
if self.post_processor is None:
snake_case = 'microsoft/speecht5_hifigan'
super().setup()
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = self.pre_processor(text=lowercase_, return_tensors='pt', truncation=lowercase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
snake_case = load_dataset('Matthijs/cmu-arctic-xvectors', split='validation' )
snake_case = torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCamelCase ( self, lowercase_ ) -> Any:
with torch.no_grad():
return self.model.generate_speech(**lowercase_ )
def _lowerCamelCase ( self, lowercase_ ) -> Tuple:
with torch.no_grad():
return self.post_processor(lowercase_ ).cpu().detach()
| 332 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
snake_case_ = 25_0004
snake_case_ = 25_0020
@require_sentencepiece
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = MBartaaTokenizer
__UpperCamelCase = MBartaaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def UpperCAmelCase__ ( self :Dict ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = MBartaaTokenizer(lowercase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
UpperCAmelCase = '<s>'
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 10_54 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def UpperCAmelCase__ ( self :Any ) -> Dict:
UpperCAmelCase = MBartaaTokenizer(lowercase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase_ )
UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
# fmt: off
UpperCAmelCase = {'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def UpperCAmelCase__ ( self :List[str] ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(lowercase_ )
UpperCAmelCase = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
UpperCAmelCase = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
UpperCAmelCase = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
__UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls :int ) -> Optional[int]:
UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
UpperCAmelCase = 1
return cls
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38 )
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
UpperCAmelCase = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> str:
UpperCAmelCase = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , lowercase_ )
UpperCAmelCase = 10
UpperCAmelCase = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[0] , lowercase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> List[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_00_53, 25_00_01] )
def UpperCAmelCase__ ( self :List[str] ) -> Optional[Any]:
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = MBartaaTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def UpperCAmelCase__ ( self :Tuple ) -> Any:
UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' )
UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self :Tuple ) -> Any:
UpperCAmelCase = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' )
UpperCAmelCase = targets['input_ids']
UpperCAmelCase = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self :Tuple ) -> Any:
UpperCAmelCase = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_00_04, 62, 30_34, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_00_01,
} , )
| 78 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar('''T''')
class a ( Generic[T] ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
lowerCamelCase_ = data
lowerCamelCase_ = self
lowerCamelCase_ = 0
class a ( Generic[T] ):
def __init__( self : Any ) -> None:
# map from node name to the node object
lowerCamelCase_ = {}
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# create a new set with x as its member
lowerCamelCase_ = DisjointSetTreeNode(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
lowerCamelCase_ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCamelCase_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : DisjointSetTreeNode[T] , __SCREAMING_SNAKE_CASE : DisjointSetTreeNode[T] ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCamelCase_ = nodea
else:
lowerCamelCase_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__SCREAMING_SNAKE_CASE ) , self.find_set(__SCREAMING_SNAKE_CASE ) )
class a ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
lowerCamelCase_ = {}
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCamelCase_ = {}
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# add an edge with the given weight
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def UpperCamelCase ( self : List[Any] ) -> GraphUndirectedWeighted[T]:
lowerCamelCase_ = []
lowerCamelCase_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __SCREAMING_SNAKE_CASE : x[2] )
# creating the disjoint set
lowerCamelCase_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__SCREAMING_SNAKE_CASE )
# MST generation
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edges[index]
index += 1
lowerCamelCase_ = disjoint_set.find_set(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = disjoint_set.find_set(__SCREAMING_SNAKE_CASE )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
disjoint_set.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return graph
| 183 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "imagegpt"
lowercase = ["past_key_values"]
lowercase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , snake_case_ : List[str]=512 + 1 , snake_case_ : Dict=32 * 32 , snake_case_ : List[str]=512 , snake_case_ : Optional[Any]=24 , snake_case_ : Dict=8 , snake_case_ : Optional[Any]=None , snake_case_ : List[str]="quick_gelu" , snake_case_ : Dict=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[int]=1E-5 , snake_case_ : int=0.02 , snake_case_ : Optional[int]=True , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=False , snake_case_ : int=False , snake_case_ : str=False , **snake_case_ : Optional[Any] , ):
snake_case__ : Any = vocab_size
snake_case__ : List[Any] = n_positions
snake_case__ : Optional[Any] = n_embd
snake_case__ : List[Any] = n_layer
snake_case__ : str = n_head
snake_case__ : Tuple = n_inner
snake_case__ : Union[str, Any] = activation_function
snake_case__ : Optional[int] = resid_pdrop
snake_case__ : Tuple = embd_pdrop
snake_case__ : Optional[int] = attn_pdrop
snake_case__ : List[Any] = layer_norm_epsilon
snake_case__ : str = initializer_range
snake_case__ : Dict = scale_attn_weights
snake_case__ : List[str] = use_cache
snake_case__ : int = scale_attn_by_inverse_layer_idx
snake_case__ : Optional[int] = reorder_and_upcast_attn
snake_case__ : Optional[int] = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case_ , **snake_case_ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
@property
def lowerCamelCase ( self : List[str] ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : "FeatureExtractionMixin" , snake_case_ : int = 1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , snake_case_ : int = 3 , snake_case_ : int = 32 , snake_case_ : int = 32 , ):
snake_case__ : Tuple = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : List[str] = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
return inputs
| 43 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : int ):
snake_case__ : List[str] = """hf-internal-testing/tiny-random-t5"""
snake_case__ : Any = AutoTokenizer.from_pretrained(snake_case_ )
snake_case__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer("""This is me""" , return_tensors="""pt""" )
snake_case__ : str = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case__ : Optional[int] = model.generate(**snake_case_ )
snake_case__ : Any = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case__ : Optional[Any] = model_reloaded.generate(**snake_case_ )
self.assertTrue(torch.allclose(snake_case_ , snake_case_ ) )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = """hf-internal-testing/tiny-random-t5"""
snake_case__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
snake_case__ : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(snake_case_ ):
model.save_pretrained(snake_case_ )
snake_case__ : int = model.reverse_bettertransformer()
model.save_pretrained(snake_case_ )
| 43 | 1 |
'''simple docstring'''
def snake_case_ (_a : int , _a : int ):
return "\n".join(
F"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 34 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCamelCase ( a ) -> Dict: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def UpperCamelCase ( a ) -> Optional[int]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :int
__SCREAMING_SNAKE_CASE :str
class _SCREAMING_SNAKE_CASE ( __a ):
def snake_case__ ( self : List[Any] ):
__magic_name__ = {}
__magic_name__ = []
__magic_name__ = 1
__magic_name__ = [1, 2]
__magic_name__ = {'''a''': 1, '''b''': 2}
__magic_name__ = {'''a''': [1, 2], '''b''': [3, 4]}
__magic_name__ = {'''a''': {'''1''': 1}, '''b''': 2}
__magic_name__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__magic_name__ = {}
__magic_name__ = []
__magic_name__ = 2
__magic_name__ = [2, 3]
__magic_name__ = {'''a''': 2, '''b''': 3}
__magic_name__ = {'''a''': [2, 3], '''b''': [4, 5]}
__magic_name__ = {'''a''': {'''1''': 2}, '''b''': 3}
__magic_name__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
__magic_name__ = 2
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
__magic_name__ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
__magic_name__ = {'''a''': 2, '''b''': 0, '''c''': 2}
__magic_name__ = {
'''a''': np.eye(2 ).astype(a__ ),
'''b''': np.zeros(3 ).astype(a__ ),
'''c''': np.ones(2 ).astype(a__ ),
}
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__ ) , a__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__ ) , a__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a__ ): # can't pickle a local lambda
map_nested(lambda a__ : x + 1 , a__ , num_proc=a__ )
def snake_case__ ( self : int ):
__magic_name__ = {'''a''': 1, '''b''': 2}
__magic_name__ = {'''a''': 3, '''b''': 4}
__magic_name__ = {'''a''': 5, '''b''': 6}
__magic_name__ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(a__ , a__ , a__ ) ) , a__ )
def snake_case__ ( self : Tuple ):
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :int = """bar"""
__magic_name__ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(a__ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCamelCase ( a , a , a ) -> str:
'''simple docstring'''
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
__magic_name__ = {F'''{i}''': i for i in range(a )}
__magic_name__ = map_nested(lambda a : x + 10 , a , num_proc=a , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _SCREAMING_SNAKE_CASE ( __a ):
@require_tf
def snake_case__ ( self : Optional[Any] ):
import tensorflow as tf
from tensorflow.keras import layers
__magic_name__ = layers.Dense(2 )
def gen_random_output():
__magic_name__ = tf.random.uniform((1, 3) )
return model(a__ ).numpy()
with temp_seed(42 , set_tensorflow=a__ ):
__magic_name__ = gen_random_output()
with temp_seed(42 , set_tensorflow=a__ ):
__magic_name__ = gen_random_output()
__magic_name__ = gen_random_output()
np.testing.assert_equal(a__ , a__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def snake_case__ ( self : Optional[int] ):
import torch
def gen_random_output():
__magic_name__ = torch.nn.Linear(3 , 2 )
__magic_name__ = torch.rand(1 , 3 )
return model(a__ ).detach().numpy()
with temp_seed(42 , set_pytorch=a__ ):
__magic_name__ = gen_random_output()
with temp_seed(42 , set_pytorch=a__ ):
__magic_name__ = gen_random_output()
__magic_name__ = gen_random_output()
np.testing.assert_equal(a__ , a__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def snake_case__ ( self : Optional[Any] ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__magic_name__ = gen_random_output()
with temp_seed(42 ):
__magic_name__ = gen_random_output()
__magic_name__ = gen_random_output()
np.testing.assert_equal(a__ , a__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = NestedDataStructure(a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def UpperCamelCase ( a , a ) -> Dict:
'''simple docstring'''
__magic_name__ = NestedDataStructure(a ).flatten()
assert output == expected_output
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ = A(x=1 , y='''foobar''' )
__magic_name__ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(a ) == expected_output
__magic_name__ = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
__magic_name__ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(a ) == expected_output
with pytest.raises(a ):
asdict([1, A(x=10 , y='''foo''' )] )
def UpperCamelCase ( a ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def UpperCamelCase ( a ) -> Any:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
with Pool(2 ) as pool:
__magic_name__ = list(iflatmap_unordered(a , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__magic_name__ = list(iflatmap_unordered(a , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__magic_name__ = []
for yield_time, content in iflatmap_unordered(
a , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(a ) == 4
| 350 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __a ,__a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE :str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE :Optional[Any] = frozenset([] )
def snake_case__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
__magic_name__ = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Any , a__ : Optional[int] , a__ : List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
__magic_name__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = StableDiffusionInpaintPipeline(**a__ )
__magic_name__ = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = sd_pipe(**a__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Tuple ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case__ ( self : List[str] ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case__ ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
__magic_name__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 98 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ : Tuple = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
UpperCAmelCase_ : List[Any] = 10
UpperCAmelCase_ : List[str] = 256
def _A (__a ) -> Optional[MinHash]:
"""simple docstring"""
if len(__a ) < MIN_NUM_TOKENS:
return None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MinHash(num_perm=__a )
for token in set(__a ):
min_hash.update(token.encode() )
return min_hash
def _A (__a ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(__a ) if len(t.strip() ) > 0}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , *,
lowercase_ : float = 0.85 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = duplication_jaccard_threshold
SCREAMING_SNAKE_CASE_ : Any = NUM_PERM
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
SCREAMING_SNAKE_CASE_ : List[Any] = defaultdict(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Tuple , lowercase_ : MinHash):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._index.query(lowercase_)
if code_key in self._index.keys:
print(F'Duplicate key {code_key}')
return
self._index.insert(lowercase_ , lowercase_)
if len(lowercase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = []
for base, duplicates in self._duplicate_clusters.items():
SCREAMING_SNAKE_CASE_ : Tuple = [base] + list(lowercase_)
# reformat the cluster to be a list of dict
SCREAMING_SNAKE_CASE_ : Optional[int] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(lowercase_)
return duplicate_clusters
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_duplicate_clusters()
with open(lowercase_ , '''w''') as f:
json.dump(lowercase_ , lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = element
SCREAMING_SNAKE_CASE_ : Dict = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _A (__a ) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__a , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = DuplicationIndex(duplication_jaccard_threshold=__a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__a ) ) , max_queue_size=1_00 ) ):
di.add(__a , __a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _A (__a , __a ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_tokens(__a )
SCREAMING_SNAKE_CASE_ : Any = get_tokens(__a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ : List[str] = None
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
for elementa in cluster:
SCREAMING_SNAKE_CASE_ : Any = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__a , __a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
extremes.append(__a )
return extremes
def _A (__a , __a , __a ) -> List[str]:
"""simple docstring"""
global _shared_dataset
SCREAMING_SNAKE_CASE_ : Any = dataset
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : str = partial(_find_cluster_extremes_shared , jaccard_threshold=__a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__a , __a , ) , total=len(__a ) , ):
extremes_list.append(__a )
return extremes_list
def _A (__a , __a = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = make_duplicate_clusters(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : int = find_extremes(__a , __a , __a )
for extremes in extremes_clusters:
for element in extremes:
SCREAMING_SNAKE_CASE_ : int = element
SCREAMING_SNAKE_CASE_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
SCREAMING_SNAKE_CASE_ : Any = dataset.filter(lambda __a , __a : idx not in remove_indices , with_indices=__a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
SCREAMING_SNAKE_CASE_ : Dict = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(__a )}' )
print(f'Number of duplicate clusters: {len(__a )}' )
print(f'Files in duplicate cluster: {len(__a )}' )
print(f'Unique files in duplicate cluster: {len(__a )}' )
print(f'Filtered dataset size: {len(__a )}' )
return ds_filter, duplicate_clusters
| 91 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ : int = logging.get_logger(__name__)
def _A (__a ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256}
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''')
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : List[str] = do_rescale
SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE_ : List[Any] = offset
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_)
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa)
if offset:
SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2)
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_)
if do_resize:
SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_)
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_)
if do_rescale:
SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_)
if do_normalize:
SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_)
return image
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''')
if not valid_images(lowercase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 91 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
A: Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : tuple , UpperCamelCase : Path , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict=False , ):
output_path.parent.mkdir(parents=UpperCamelCase , exist_ok=UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase , UpperCamelCase , f=output_path.as_posix() , input_names=UpperCamelCase , output_names=UpperCamelCase , dynamic_axes=UpperCamelCase , do_constant_folding=UpperCamelCase , use_external_data_format=UpperCamelCase , enable_onnx_checker=UpperCamelCase , opset_version=UpperCamelCase , )
else:
export(
UpperCamelCase , UpperCamelCase , f=output_path.as_posix() , input_names=UpperCamelCase , output_names=UpperCamelCase , dynamic_axes=UpperCamelCase , do_constant_folding=UpperCamelCase , opset_version=UpperCamelCase , )
@torch.no_grad()
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : bool = False ):
UpperCAmelCase : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase : List[Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : Dict = StableDiffusionPipeline.from_pretrained(UpperCamelCase , torch_dtype=UpperCamelCase ).to(UpperCamelCase )
UpperCAmelCase : List[Any] = Path(UpperCamelCase )
# TEXT ENCODER
UpperCAmelCase : Dict = pipeline.text_encoder.config.max_position_embeddings
UpperCAmelCase : List[Any] = pipeline.text_encoder.config.hidden_size
UpperCAmelCase : List[Any] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=UpperCamelCase , )
del pipeline.text_encoder
# UNET
UpperCAmelCase : str = pipeline.unet.config.in_channels
UpperCAmelCase : List[str] = pipeline.unet.config.sample_size
UpperCAmelCase : str = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase ),
torch.randn(2 ).to(device=UpperCamelCase , dtype=UpperCamelCase ),
torch.randn(2 , UpperCamelCase , UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase ),
False,
) , output_path=UpperCamelCase , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=UpperCamelCase , use_external_data_format=UpperCamelCase , )
UpperCAmelCase : str = str(unet_path.absolute().as_posix() )
UpperCAmelCase : Optional[int] = os.path.dirname(UpperCamelCase )
UpperCAmelCase : Tuple = onnx.load(UpperCamelCase )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase )
os.mkdir(UpperCamelCase )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase , UpperCamelCase , save_as_external_data=UpperCamelCase , all_tensors_to_one_file=UpperCamelCase , location="""weights.pb""" , convert_attribute=UpperCamelCase , )
del pipeline.unet
# VAE ENCODER
UpperCAmelCase : Optional[Any] = pipeline.vae
UpperCAmelCase : Optional[int] = vae_encoder.config.in_channels
UpperCAmelCase : List[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
UpperCAmelCase : Union[str, Any] = lambda UpperCamelCase , UpperCamelCase : vae_encoder.encode(UpperCamelCase , UpperCamelCase )[0].sample()
onnx_export(
UpperCamelCase , model_args=(
torch.randn(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCamelCase , )
# VAE DECODER
UpperCAmelCase : List[str] = pipeline.vae
UpperCAmelCase : str = vae_decoder.config.latent_channels
UpperCAmelCase : Any = vae_decoder.config.out_channels
# forward only through the decoder part
UpperCAmelCase : List[str] = vae_encoder.decode
onnx_export(
UpperCamelCase , model_args=(
torch.randn(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCamelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
UpperCAmelCase : Optional[Any] = pipeline.safety_checker
UpperCAmelCase : Tuple = safety_checker.config.vision_config.num_channels
UpperCAmelCase : int = safety_checker.config.vision_config.image_size
UpperCAmelCase : int = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , ).to(device=UpperCamelCase , dtype=UpperCamelCase ),
torch.randn(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=UpperCamelCase , )
del pipeline.safety_checker
UpperCAmelCase : str = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
UpperCAmelCase : Optional[int] = pipeline.feature_extractor
else:
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Dict = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase )
print("""ONNX pipeline saved to""" , UpperCamelCase )
del pipeline
del onnx_pipeline
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
A: int = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 362 |
"""simple docstring"""
import math
import sys
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = """"""
try:
with open(UpperCamelCase , """rb""" ) as binary_file:
UpperCAmelCase : str = binary_file.read()
for dat in data:
UpperCAmelCase : List[Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = {"""0""": """0""", """1""": """1"""}
UpperCAmelCase , UpperCAmelCase : Optional[int] = """""", """"""
UpperCAmelCase : int = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase : Any = lexicon[curr_string]
result += last_match_id
UpperCAmelCase : Any = last_match_id + """0"""
if math.loga(UpperCamelCase ).is_integer():
UpperCAmelCase : Optional[Any] = {}
for curr_key in list(UpperCamelCase ):
UpperCAmelCase : Dict = lexicon.pop(UpperCamelCase )
UpperCAmelCase : int = new_lex
UpperCAmelCase : int = last_match_id + """1"""
index += 1
UpperCAmelCase : List[str] = """"""
return result
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Dict = 8
try:
with open(UpperCamelCase , """wb""" ) as opened_file:
UpperCAmelCase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase : List[str] = data_bits[counter:]
UpperCAmelCase : Tuple = data_bits[counter + 1 :]
return data_bits
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : int = read_file_binary(UpperCamelCase )
UpperCAmelCase : str = remove_prefix(UpperCamelCase )
UpperCAmelCase : Any = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 76 | 0 |
def lowerCamelCase__ ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__snake_case = set()
# Replace all the whitespace in our sentence
__snake_case = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(snake_case_ ) == 26
def lowerCamelCase__ ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__snake_case = [False] * 26
for char in input_str:
if char.islower():
__snake_case = True
elif char.isupper():
__snake_case = True
return all(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowerCamelCase__ ( ) -> None:
from timeit import timeit
__snake_case = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=snake_case_ ) )
print(timeit('''is_pangram_faster()''' , setup=snake_case_ ) )
print(timeit('''is_pangram_fastest()''' , setup=snake_case_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE_ : int = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def __UpperCAmelCase ( self : int ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Any:
a = "<pad>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 10_02 )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase , f.name )
a = XLMRobertaTokenizer(f.name , keep_accents=__lowerCamelCase )
a = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> str:
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(__lowerCamelCase )
a = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
a = "Hello World!"
a = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> int:
a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
a = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
# fmt: off
a = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 107 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 104 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a_ : Tuple = get_tests_dir("fixtures")
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> str:
# A mock response for an HTTP head request to emulate server down
_a = mock.Mock()
_a = 5_00
_a = {}
_a = HTTPError
_a = {}
# Download this model to make sure it's in the cache.
_a = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__magic_name__ ) as mock_head:
_a = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCAmelCase ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_a = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def __UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_a = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
_a = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(__magic_name__ )
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ) -> Dict:
_a = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __UpperCAmelCase ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def __UpperCAmelCase ( self ) -> str:
_a = ViTImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__magic_name__ , repo_id='test-image-processor' , push_to_hub=__magic_name__ , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = ViTImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__magic_name__ , repo_id='valid_org/test-image-processor-org' , push_to_hub=__magic_name__ , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __UpperCAmelCase ( self ) -> Any:
CustomImageProcessor.register_for_auto_class()
_a = CustomImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
_a = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 104 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *__UpperCAmelCase : Any , **__UpperCAmelCase : int ) ->Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : List[Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *__UpperCAmelCase : str , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : int ) ->Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Dict ) ->Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : int , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : int ) ->Dict:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : Any , **__UpperCAmelCase : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Any ) ->str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : str , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Dict ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->Any:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Dict ) ->Any:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 0 | import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCamelCase ( _snake_case ):
'''simple docstring'''
__UpperCamelCase : Any = ["""pixel_values"""]
def __init__( self : Tuple , snake_case_ : int = True , snake_case_ : int = None , snake_case_ : Optional[int] = PILImageResampling.BICUBIC , snake_case_ : Optional[Any] = True , snake_case_ : int = None , snake_case_ : str = True , snake_case_ : int = 1 / 255 , snake_case_ : Union[str, Any] = True , snake_case_ : int = None , snake_case_ : Dict = None , snake_case_ : Any = True , **snake_case_ : Dict , ):
super().__init__(**UpperCamelCase__ )
UpperCamelCase_: Tuple = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase_: Optional[int] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCamelCase_: Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase_: Any = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" )
UpperCamelCase_: Tuple = do_resize
UpperCamelCase_: Union[str, Any] = size
UpperCamelCase_: Dict = resample
UpperCamelCase_: Optional[Any] = do_center_crop
UpperCamelCase_: Any = crop_size
UpperCamelCase_: List[Any] = do_rescale
UpperCamelCase_: Optional[int] = rescale_factor
UpperCamelCase_: str = do_normalize
UpperCamelCase_: Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_: Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_: Union[str, Any] = do_convert_rgb
def lowerCAmelCase__ ( self : Dict , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Optional[Any] = PILImageResampling.BICUBIC , snake_case_ : Tuple = None , **snake_case_ : Any , ):
UpperCamelCase_: List[Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase_: Tuple = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] = None , **snake_case_ : str , ):
UpperCamelCase_: Union[str, Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Any = None , **snake_case_ : int , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self : int , snake_case_ : int , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] = None , **snake_case_ : str , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] = None , snake_case_ : List[Any] = None , snake_case_ : Dict = None , snake_case_ : List[str] = None , snake_case_ : Union[str, Any] = None , snake_case_ : Tuple = None , snake_case_ : Optional[Any] = None , snake_case_ : List[str] = None , snake_case_ : List[Any] = None , snake_case_ : Union[str, Any] = None , snake_case_ : List[str] = None , snake_case_ : int = None , snake_case_ : str = ChannelDimension.FIRST , **snake_case_ : int , ):
UpperCamelCase_: List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Optional[int] = size if size is not None else self.size
UpperCamelCase_: str = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ )
UpperCamelCase_: Dict = resample if resample is not None else self.resample
UpperCamelCase_: Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_: Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_: Any = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ )
UpperCamelCase_: Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: int = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: str = image_std if image_std is not None else self.image_std
UpperCamelCase_: List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: Optional[int] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: Dict = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: str = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
UpperCamelCase_: str = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
UpperCamelCase_: Any = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase_: str = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase_: List[str] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
UpperCamelCase_: Any = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCamelCase_: Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 363 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase_ : Dict = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 223 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =StableDiffusionPanoramaPipeline
UpperCamelCase__ : Union[str, Any] =TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : str =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase : List[Any] =DDIMScheduler()
torch.manual_seed(0 )
__UpperCamelCase : str =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : List[str] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : List[Any] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : int ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Dict =torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[int] ={
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Any =self.get_dummy_components()
__UpperCamelCase : Tuple =StableDiffusionPanoramaPipeline(**lowerCamelCase__ )
__UpperCamelCase : List[str] =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Any =sd_pipe(**lowerCamelCase__ ).images
__UpperCamelCase : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Tuple =np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[Any] =self.get_dummy_components()
__UpperCamelCase : Union[str, Any] =StableDiffusionPanoramaPipeline(**lowerCamelCase__ )
__UpperCamelCase : List[str] =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : int ='french fries'
__UpperCamelCase : Optional[Any] =sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
__UpperCamelCase : Dict =output.images
__UpperCamelCase : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : List[Any] =np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[Any] =self.get_dummy_components()
__UpperCamelCase : Dict =StableDiffusionPanoramaPipeline(**lowerCamelCase__ )
__UpperCamelCase : str =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : str =sd_pipe(**lowerCamelCase__ , view_batch_size=2 )
__UpperCamelCase : Any =output.images
__UpperCamelCase : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : List[str] =np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[int] =self.get_dummy_components()
__UpperCamelCase : List[str] =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
__UpperCamelCase : Any =StableDiffusionPanoramaPipeline(**lowerCamelCase__ )
__UpperCamelCase : int =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[str] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[str] =sd_pipe(**lowerCamelCase__ ).images
__UpperCamelCase : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Optional[int] =np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : int =self.get_dummy_components()
__UpperCamelCase : Tuple =PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCamelCase__ )
__UpperCamelCase : Dict =StableDiffusionPanoramaPipeline(**lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[str] =sd_pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Optional[Any] =np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Tuple =torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Dict ={
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] ='stabilityai/stable-diffusion-2-base'
__UpperCamelCase : Union[str, Any] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : Optional[Any] =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : Optional[int] =self.get_inputs()
__UpperCamelCase : str =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : str =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
__UpperCamelCase : Dict =np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCamelCase__ )
__UpperCamelCase : List[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : Tuple =self.get_inputs()
__UpperCamelCase : List[Any] =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : str =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
__UpperCamelCase : Dict =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =0
def callback_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
__UpperCamelCase : List[Any] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCamelCase : Optional[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__UpperCamelCase : Tuple =latents[0, -3:, -3:, -1]
__UpperCamelCase : Optional[Any] =np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__UpperCamelCase : List[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__UpperCamelCase : Dict =latents[0, -3:, -3:, -1]
__UpperCamelCase : int =np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__UpperCamelCase : Any =False
__UpperCamelCase : int ='stabilityai/stable-diffusion-2-base'
__UpperCamelCase : Optional[int] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : int =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : str =self.get_inputs()
pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase : Optional[int] ='stabilityai/stable-diffusion-2-base'
__UpperCamelCase : Optional[Any] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : str =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
__UpperCamelCase : List[Any] =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : Optional[Any] =self.get_inputs()
__UpperCamelCase : List[str] =pipe(**lowerCamelCase__ )
__UpperCamelCase : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 71 |
from math import factorial
def UpperCAmelCase__ ( _A : int = 1_00 ):
'''simple docstring'''
return sum(int(_A ) for x in str(factorial(_A ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 188 | 0 |
import math
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
UpperCamelCase :Optional[Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__magic_name__ )
if number < 1:
UpperCamelCase :Union[str, Any] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__magic_name__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCamelCase :Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2
UpperCamelCase :Optional[Any] = [3, 5]
UpperCamelCase :Union[str, Any] = 2
UpperCamelCase :Optional[Any] = 3
for block in range(1 , __magic_name__ ):
for _ in range(__magic_name__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCAmelCase_ : Optional[int] = 0
try:
UpperCAmelCase_ : int = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 62 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = """char"""
snake_case__ : Optional[int] = """bpe"""
snake_case__ : Dict = """wp"""
UpperCAmelCase_ : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = ["""image_processor""", """char_tokenizer"""]
snake_case__ : Dict = """ViTImageProcessor"""
snake_case__ : List[str] = """MgpstrTokenizer"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Any ):
UpperCamelCase :Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCamelCase , )
UpperCamelCase :Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
UpperCamelCase :Optional[int] = tokenizer
UpperCamelCase :int = AutoTokenizer.from_pretrained("""gpt2""" )
UpperCamelCase :int = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str=None , **__lowerCamelCase : Dict ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
UpperCamelCase :Tuple = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
UpperCamelCase :Any = self.char_tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase :Dict = encodings["""input_ids"""]
return inputs
def _A ( self : Tuple , __lowerCamelCase : str ):
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = sequences
UpperCamelCase :Tuple = char_preds.size(0 )
UpperCamelCase , UpperCamelCase :str = self._decode_helper(__lowerCamelCase , """char""" )
UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """bpe""" )
UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """wp""" )
UpperCamelCase :Any = []
UpperCamelCase :str = []
for i in range(__lowerCamelCase ):
UpperCamelCase :Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase :Any = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase :str = scores.index(max(__lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :Dict = final_strs
UpperCamelCase :Union[str, Any] = final_scores
UpperCamelCase :List[str] = char_strs
UpperCamelCase :Tuple = bpe_strs
UpperCamelCase :Optional[Any] = wp_strs
return out
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
if format == DecodeType.CHARACTER:
UpperCamelCase :List[str] = self.char_decode
UpperCamelCase :Union[str, Any] = 1
UpperCamelCase :Optional[Any] = """[s]"""
elif format == DecodeType.BPE:
UpperCamelCase :Union[str, Any] = self.bpe_decode
UpperCamelCase :str = 2
UpperCamelCase :int = """#"""
elif format == DecodeType.WORDPIECE:
UpperCamelCase :int = self.wp_decode
UpperCamelCase :Any = 102
UpperCamelCase :int = """[SEP]"""
else:
raise ValueError(F"""Format {format} is not supported.""" )
UpperCamelCase , UpperCamelCase :int = [], []
UpperCamelCase :Any = pred_logits.size(0 )
UpperCamelCase :List[Any] = pred_logits.size(1 )
UpperCamelCase , UpperCamelCase :Optional[int] = pred_logits.topk(1 , dim=-1 , largest=__lowerCamelCase , sorted=__lowerCamelCase )
UpperCamelCase :Optional[Any] = preds_index.view(-1 , __lowerCamelCase )[:, 1:]
UpperCamelCase :int = decoder(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[int] = torch.nn.functional.softmax(__lowerCamelCase , dim=2 ).max(dim=2 )
UpperCamelCase :Tuple = preds_max_prob[:, 1:]
for index in range(__lowerCamelCase ):
UpperCamelCase :Tuple = preds_str[index].find(__lowerCamelCase )
UpperCamelCase :List[Any] = preds_str[index][:pred_eos]
UpperCamelCase :List[Any] = preds_index[index].cpu().tolist()
UpperCamelCase :Optional[Any] = pred_index.index(__lowerCamelCase ) if eos_token in pred_index else -1
UpperCamelCase :List[str] = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase :List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCamelCase )
conf_scores.append(__lowerCamelCase )
return dec_strs, conf_scores
def _A ( self : Optional[Any] , __lowerCamelCase : str ):
UpperCamelCase :Dict = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
return self.bpe_tokenizer.batch_decode(__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : Optional[int] ):
UpperCamelCase :Any = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
| 62 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> Dict:
UpperCAmelCase : Any = checkpoint
UpperCAmelCase : List[str] = {}
UpperCAmelCase : List[str] = vae_state_dict['encoder.conv_in.weight']
UpperCAmelCase : Dict = vae_state_dict['encoder.conv_in.bias']
UpperCAmelCase : Optional[Any] = vae_state_dict['encoder.conv_out.weight']
UpperCAmelCase : Optional[int] = vae_state_dict['encoder.conv_out.bias']
UpperCAmelCase : Optional[int] = vae_state_dict['encoder.norm_out.weight']
UpperCAmelCase : Optional[int] = vae_state_dict['encoder.norm_out.bias']
UpperCAmelCase : Dict = vae_state_dict['decoder.conv_in.weight']
UpperCAmelCase : List[str] = vae_state_dict['decoder.conv_in.bias']
UpperCAmelCase : Tuple = vae_state_dict['decoder.conv_out.weight']
UpperCAmelCase : List[str] = vae_state_dict['decoder.conv_out.bias']
UpperCAmelCase : Dict = vae_state_dict['decoder.norm_out.weight']
UpperCAmelCase : Union[str, Any] = vae_state_dict['decoder.norm_out.bias']
UpperCAmelCase : Any = vae_state_dict['quant_conv.weight']
UpperCAmelCase : Union[str, Any] = vae_state_dict['quant_conv.bias']
UpperCAmelCase : Tuple = vae_state_dict['post_quant_conv.weight']
UpperCAmelCase : List[Any] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase : str = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
UpperCAmelCase : int = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase : Dict = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
UpperCAmelCase : Dict = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(_lowerCAmelCase )
}
for i in range(_lowerCAmelCase ):
UpperCAmelCase : List[str] = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
UpperCAmelCase : Any = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
UpperCAmelCase : Any = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
UpperCAmelCase : Optional[int] = renew_vae_resnet_paths(_lowerCAmelCase )
UpperCAmelCase : Any = {'old': f'''down.{i}.block''', 'new': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
UpperCAmelCase : List[Any] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
UpperCAmelCase : str = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase : int = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
UpperCAmelCase : Optional[Any] = renew_vae_resnet_paths(_lowerCAmelCase )
UpperCAmelCase : Dict = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
UpperCAmelCase : int = renew_vae_attention_paths(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = num_up_blocks - 1 - i
UpperCAmelCase : Any = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
UpperCAmelCase : Optional[int] = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
UpperCAmelCase : Optional[Any] = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
UpperCAmelCase : Tuple = renew_vae_resnet_paths(_lowerCAmelCase )
UpperCAmelCase : Any = {'old': f'''up.{block_id}.block''', 'new': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = [key for key in vae_state_dict if 'decoder.mid.block' in key]
UpperCAmelCase : Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase : List[str] = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
UpperCAmelCase : str = renew_vae_resnet_paths(_lowerCAmelCase )
UpperCAmelCase : Any = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
UpperCAmelCase : Any = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
UpperCAmelCase : List[str] = renew_vae_attention_paths(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
return new_checkpoint
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , ) -> List[Any]:
# Only support V1
UpperCAmelCase : Any = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
UpperCAmelCase : str = io.BytesIO(r.content )
UpperCAmelCase : Any = OmegaConf.load(_lowerCAmelCase )
UpperCAmelCase : List[Any] = 512
UpperCAmelCase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
UpperCAmelCase : Optional[Any] = {}
with safe_open(_lowerCAmelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
UpperCAmelCase : int = f.get_tensor(_lowerCAmelCase )
else:
UpperCAmelCase : int = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )['state_dict']
# Convert the VAE model.
UpperCAmelCase : int = create_vae_diffusers_config(_lowerCAmelCase , image_size=_lowerCAmelCase )
UpperCAmelCase : int = custom_convert_ldm_vae_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Any = AutoencoderKL(**_lowerCAmelCase )
vae.load_state_dict(_lowerCAmelCase )
vae.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
_lowerCamelCase : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 336 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( _lowerCAmelCase ) -> str:
__lowerCamelCase ,__lowerCamelCase : List[Any] = image.size
__lowerCamelCase ,__lowerCamelCase : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCamelCase : Optional[Any] = image.resize((w, h) ,resample=PIL_INTERPOLATION['lanczos'] )
__lowerCamelCase : List[Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
__lowerCamelCase : Optional[Any] = image[None].transpose(0 ,3 ,1 ,2 )
__lowerCamelCase : int = torch.from_numpy(_lowerCAmelCase )
return 2.0 * image - 1.0
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : str , _a : VQModel , _a : UNetaDModel , _a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Optional[Any]:
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] , _a : Union[torch.Tensor, PIL.Image.Image] = None , _a : Optional[int] = 1 , _a : Optional[int] = 100 , _a : Optional[float] = 0.0 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[str] = "pil" , _a : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_a , PIL.Image.Image ):
__lowerCamelCase : Any = 1
elif isinstance(_a , torch.Tensor ):
__lowerCamelCase : Any = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a , PIL.Image.Image ):
__lowerCamelCase : List[str] = preprocess(_a )
__lowerCamelCase ,__lowerCamelCase : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowerCamelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowerCamelCase : Tuple = next(self.unet.parameters() ).dtype
__lowerCamelCase : Optional[int] = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__lowerCamelCase : Optional[int] = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__lowerCamelCase : Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : List[str] = {}
if accepts_eta:
__lowerCamelCase : str = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__lowerCamelCase : str = torch.cat([latents, image] , dim=1 )
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[Any] = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__lowerCamelCase : Union[str, Any] = self.vqvae.decode(_a ).sample
__lowerCamelCase : Union[str, Any] = torch.clamp(_a , -1.0 , 1.0 )
__lowerCamelCase : List[str] = image / 2 + 0.5
__lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 208 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Any ):
UpperCamelCase_ : Union[str, Any] = set()
UpperCamelCase_ : List[Any] = []
def parse_line(lowerCamelCase : int ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : Dict = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : List[str] = '\n'.join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
UpperCamelCase_ : Union[str, Any] = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ):
UpperCamelCase_ : Optional[int] = set()
UpperCamelCase_ : Optional[int] = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def __lowercase ( lowerCamelCase : Optional[Any] ):
return values.split(',' )
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
a_ = parser.parse_args()
a_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a_ = extract_warnings(args.output_dir, args.targets)
a_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 50 | import math
import flax.linen as nn
import jax.numpy as jnp
def __lowercase ( lowerCamelCase : jnp.ndarray , lowerCamelCase : int , lowerCamelCase : float = 1 , lowerCamelCase : float = 1 , lowerCamelCase : float = 1.0e4 , lowerCamelCase : bool = False , lowerCamelCase : float = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
UpperCamelCase_ : Dict = float(embedding_dim // 2 )
UpperCamelCase_ : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase_ : int = jnp.expand_dims(lowerCamelCase , 1 ) * jnp.expand_dims(lowerCamelCase , 0 )
# scale embeddings
UpperCamelCase_ : Tuple = scale * emb
if flip_sin_to_cos:
UpperCamelCase_ : Tuple = jnp.concatenate([jnp.cos(lowerCamelCase ), jnp.sin(lowerCamelCase )] , axis=1 )
else:
UpperCamelCase_ : Optional[int] = jnp.concatenate([jnp.sin(lowerCamelCase ), jnp.cos(lowerCamelCase )] , axis=1 )
UpperCamelCase_ : Optional[Any] = jnp.reshape(lowerCamelCase , [jnp.shape(lowerCamelCase )[0], embedding_dim] )
return signal
class _lowercase ( nn.Module ):
lowercase = 3_2
lowercase = jnp.floataa
@nn.compact
def __call__( self : str , snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(snake_case )
UpperCamelCase_ : int = nn.silu(snake_case )
UpperCamelCase_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(snake_case )
return temb
class _lowercase ( nn.Module ):
lowercase = 3_2
lowercase = False
lowercase = 1
@nn.compact
def __call__( self : int , snake_case : Any ) -> str:
"""simple docstring"""
return get_sinusoidal_embeddings(
snake_case , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 50 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
A_ : int
A_ : jnp.dtype = jnp.floataa
def a (self : str ):
"""simple docstring"""
__snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Dict , a__ : Any ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = hidden_states.shape
__snake_case = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
__snake_case = self.conv(a__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
A_ : int
A_ : jnp.dtype = jnp.floataa
def a (self : Dict ):
"""simple docstring"""
__snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.conv(a__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
A_ : int
A_ : int = None
A_ : float = 0.0
A_ : bool = None
A_ : jnp.dtype = jnp.floataa
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.in_channels if self.out_channels is None else self.out_channels
__snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__snake_case = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case = nn.Dense(a__ , dtype=self.dtype )
__snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__snake_case = nn.Dropout(self.dropout_prob )
__snake_case = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case = None
if use_nin_shortcut:
__snake_case = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__(self : Optional[Any] , a__ : int , a__ : List[Any] , a__ : str=True ):
"""simple docstring"""
__snake_case = hidden_states
__snake_case = self.norma(a__ )
__snake_case = nn.swish(a__ )
__snake_case = self.conva(a__ )
__snake_case = self.time_emb_proj(nn.swish(a__ ) )
__snake_case = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
__snake_case = hidden_states + temb
__snake_case = self.norma(a__ )
__snake_case = nn.swish(a__ )
__snake_case = self.dropout(a__ , a__ )
__snake_case = self.conva(a__ )
if self.conv_shortcut is not None:
__snake_case = self.conv_shortcut(a__ )
return hidden_states + residual
| 24 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=4, ):
A : List[str] = parent
A : Optional[int] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : List[str] = use_attention_mask
A : Union[str, Any] = use_token_type_ids
A : Any = use_labels
A : str = vocab_size
A : Union[str, Any] = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : Optional[Any] = hidden_act
A : Dict = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : int = type_vocab_size
A : str = type_sequence_label_size
A : List[Any] = initializer_range
A : str = num_choices
def _lowerCAmelCase ( self ):
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Union[str, Any] = None
if self.use_attention_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : int = None
if self.use_token_type_ids:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Optional[int] = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
A : Dict = self.prepare_config_and_inputs()
A , A , A , A : str = config_and_inputs
A : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
A : Dict = FlaxAlbertModelTester(self )
@slow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A : Dict = model_class_name.from_pretrained("""albert-base-v2""" )
A : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Dict = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
A : List[str] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )[0]
A : str = (1, 11, 768)
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[int] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], lowerCamelCase__, atol=1e-4 ) )
| 116 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : List[Any] = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "unispeech-sat"
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=504 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowercase__ : Union[str, Any]= hidden_size
lowercase__ : int= feat_extract_norm
lowercase__ : str= feat_extract_activation
lowercase__ : Any= list(snake_case__ )
lowercase__ : List[str]= list(snake_case__ )
lowercase__ : int= list(snake_case__ )
lowercase__ : Optional[int]= conv_bias
lowercase__ : List[Any]= num_conv_pos_embeddings
lowercase__ : str= num_conv_pos_embedding_groups
lowercase__ : int= len(self.conv_dim )
lowercase__ : Dict= num_hidden_layers
lowercase__ : Tuple= intermediate_size
lowercase__ : Union[str, Any]= hidden_act
lowercase__ : Dict= num_attention_heads
lowercase__ : Union[str, Any]= hidden_dropout
lowercase__ : Union[str, Any]= attention_dropout
lowercase__ : int= activation_dropout
lowercase__ : str= feat_proj_dropout
lowercase__ : List[Any]= final_dropout
lowercase__ : int= layerdrop
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : int= initializer_range
lowercase__ : Tuple= vocab_size
lowercase__ : Optional[int]= num_clusters
lowercase__ : str= do_stable_layer_norm
lowercase__ : str= use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any]= apply_spec_augment
lowercase__ : Optional[int]= mask_time_prob
lowercase__ : List[Any]= mask_time_length
lowercase__ : int= mask_time_min_masks
lowercase__ : Union[str, Any]= mask_feature_prob
lowercase__ : List[str]= mask_feature_length
lowercase__ : Dict= mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Any= num_codevectors_per_group
lowercase__ : str= num_codevector_groups
lowercase__ : Optional[Any]= contrastive_logits_temperature
lowercase__ : int= feat_quantizer_dropout
lowercase__ : Union[str, Any]= num_negatives
lowercase__ : int= codevector_dim
lowercase__ : Union[str, Any]= proj_codevector_dim
lowercase__ : Union[str, Any]= diversity_loss_weight
# ctc loss
lowercase__ : str= ctc_loss_reduction
lowercase__ : str= ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ : Tuple= classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ : Tuple= list(snake_case__ )
lowercase__ : Optional[int]= list(snake_case__ )
lowercase__ : Dict= list(snake_case__ )
lowercase__ : int= xvector_output_dim
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 150 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase__(A="ro" , A="en" , A="wmt16" , A=None ) ->None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
lowercase__ : int= f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
lowercase__ : List[Any]= datasets.load_dataset(A , A )
if save_dir is None:
lowercase__ : Union[str, Any]= f'''{dataset}-{pair}'''
lowercase__ : str= Path(A )
save_dir.mkdir(exist_ok=A )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
lowercase__ : Any= "val" if split == "validation" else split
lowercase__ : List[Any]= save_dir.joinpath(f'''{fn}.source''' )
lowercase__ : Optional[Any]= save_dir.joinpath(f'''{fn}.target''' )
lowercase__ : Optional[int]= src_path.open("w+" )
lowercase__ : Any= tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase__ : int= x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 150 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionSAGPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> List[str]:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowerCamelCase_ = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = "."
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase_ = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = "."
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase_ = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = "."
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
lowerCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( _a ):
__lowerCAmelCase : Optional[Any] = """sew"""
def __init__( self :Any ,__lowercase :Union[str, Any]=3_2 ,__lowercase :Dict=7_6_8 ,__lowercase :Tuple=1_2 ,__lowercase :Optional[int]=1_2 ,__lowercase :List[str]=3_0_7_2 ,__lowercase :Any=2 ,__lowercase :Optional[int]="gelu" ,__lowercase :List[Any]=0.1 ,__lowercase :Optional[Any]=0.1 ,__lowercase :Tuple=0.1 ,__lowercase :Tuple=0.0 ,__lowercase :Dict=0.1 ,__lowercase :Tuple=0.1 ,__lowercase :List[Any]=0.02 ,__lowercase :int=1e-5 ,__lowercase :Tuple="group" ,__lowercase :Optional[int]="gelu" ,__lowercase :int=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,__lowercase :Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,__lowercase :str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,__lowercase :List[Any]=False ,__lowercase :Optional[int]=1_2_8 ,__lowercase :List[str]=1_6 ,__lowercase :Optional[Any]=True ,__lowercase :Tuple=0.05 ,__lowercase :List[str]=1_0 ,__lowercase :str=2 ,__lowercase :Any=0.0 ,__lowercase :Dict=1_0 ,__lowercase :List[Any]=0 ,__lowercase :Tuple="mean" ,__lowercase :str=False ,__lowercase :List[str]=False ,__lowercase :List[str]=2_5_6 ,__lowercase :int=0 ,__lowercase :Tuple=1 ,__lowercase :Union[str, Any]=2 ,**__lowercase :Optional[int] ,):
super().__init__(**__lowerCAmelCase ,pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase )
snake_case__ : List[Any] = hidden_size
snake_case__ : List[Any] = feat_extract_norm
snake_case__ : Union[str, Any] = feat_extract_activation
snake_case__ : Any = list(__lowerCAmelCase )
snake_case__ : int = list(__lowerCAmelCase )
snake_case__ : Optional[Any] = list(__lowerCAmelCase )
snake_case__ : str = conv_bias
snake_case__ : List[Any] = num_conv_pos_embeddings
snake_case__ : Tuple = num_conv_pos_embedding_groups
snake_case__ : List[str] = len(self.conv_dim )
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Tuple = intermediate_size
snake_case__ : Tuple = squeeze_factor
snake_case__ : Dict = hidden_act
snake_case__ : Dict = num_attention_heads
snake_case__ : Optional[Any] = hidden_dropout
snake_case__ : List[Any] = attention_dropout
snake_case__ : Dict = activation_dropout
snake_case__ : Optional[int] = feat_proj_dropout
snake_case__ : List[str] = final_dropout
snake_case__ : Dict = layerdrop
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : str = initializer_range
snake_case__ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ : Tuple = apply_spec_augment
snake_case__ : Any = mask_time_prob
snake_case__ : int = mask_time_length
snake_case__ : Tuple = mask_time_min_masks
snake_case__ : Tuple = mask_feature_prob
snake_case__ : Optional[Any] = mask_feature_length
snake_case__ : str = mask_feature_min_masks
# ctc loss
snake_case__ : int = ctc_loss_reduction
snake_case__ : Tuple = ctc_zero_infinity
# sequence classification
snake_case__ : Optional[int] = use_weighted_layer_sum
snake_case__ : Optional[Any] = classifier_proj_size
@property
def __lowerCamelCase ( self :int ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 370 |
def _lowerCAmelCase ( __lowerCAmelCase = 50 ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def a__ ( lowerCAmelCase ) -> None:
create_state_space_tree(lowerCAmelCase , [] , 0 )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
if index == len(lowerCAmelCase ):
print(lowerCAmelCase )
return
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_A = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 171 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : str = {}
def _a (self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = super().add_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def _a (self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=1 , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
output.append(_lowerCamelCase )
else:
UpperCAmelCase__ : Any = []
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
output.append(_lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
UpperCAmelCase__ : Dict = output
def _a (self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 ):
"""simple docstring"""
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : str = []
for i in range(len(_lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCAmelCase__ : List[str] = self.token_map[placeholder_token]
UpperCAmelCase__ : Any = tokens[: 1 + int(len(_lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCAmelCase__ : Any = copy.copy(_lowerCamelCase )
random.shuffle(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = text.replace(_lowerCamelCase , """ """.join(_lowerCamelCase ) )
return text
def __call__(self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 , **_lowerCamelCase ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , )
def _a (self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 , **_lowerCamelCase ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , )
| 171 | 1 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = field
__UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
__UpperCamelCase = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = 'utf-8'
__UpperCamelCase = to_json_kwargs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.to_json_kwargs.pop('path_or_buf' , __UpperCAmelCase )
__UpperCamelCase = self.to_json_kwargs.pop('orient' , 'records' )
__UpperCamelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
__UpperCamelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
__UpperCamelCase = self.to_json_kwargs.pop('compression' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__UpperCAmelCase ) as buffer:
__UpperCamelCase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
' was passed. Please provide a local path instead.' )
__UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
__UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 263 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[128, 256, 384] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = kernel_size
__UpperCamelCase = stride
__UpperCamelCase = padding
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_attention_heads
__UpperCamelCase = depths
__UpperCamelCase = key_dim
__UpperCamelCase = drop_path_rate
__UpperCamelCase = patch_size
__UpperCamelCase = attention_ratio
__UpperCamelCase = mlp_ratio
__UpperCamelCase = initializer_range
__UpperCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = num_labels
__UpperCamelCase = initializer_range
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__UpperCamelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCamelCase = (self.model_tester.image_size, self.model_tester.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__UpperCamelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__UpperCamelCase = problem_type['title']
__UpperCamelCase = problem_type['num_labels']
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
__UpperCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__UpperCamelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
__UpperCamelCase = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Union[str, Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 263 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __A ( a_ :Tuple) -> List[Any]:
__a : Union[str, Any] = 3_84
if "tiny" in model_name:
__a : str = [3, 3, 9, 3]
__a : Union[str, Any] = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
__a : Tuple = [3, 3, 27, 3]
__a : List[str] = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
__a : Tuple = [3, 3, 27, 3]
__a : Tuple = [1_28, 2_56, 5_12, 10_24]
__a : str = 5_12
if "large" in model_name:
__a : Optional[int] = [3, 3, 27, 3]
__a : Optional[int] = [1_92, 3_84, 7_68, 15_36]
__a : Optional[Any] = 7_68
if "xlarge" in model_name:
__a : str = [3, 3, 27, 3]
__a : Optional[Any] = [2_56, 5_12, 10_24, 20_48]
__a : Any = 10_24
# set label information
__a : Dict = 1_50
__a : Union[str, Any] = "huggingface/label-files"
__a : Optional[int] = "ade20k-id2label.json"
__a : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''') , '''r'''))
__a : Dict = {int(lowerCamelCase__): v for k, v in idalabel.items()}
__a : Optional[Any] = {v: k for k, v in idalabel.items()}
__a : Optional[int] = ConvNextConfig(
depths=lowerCamelCase__ , hidden_sizes=lowerCamelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''])
__a : Any = UperNetConfig(
backbone_config=lowerCamelCase__ , auxiliary_in_channels=lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , )
return config
def __A ( a_ :str) -> str:
__a : List[str] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight'''))
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias'''))
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight'''))
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight"""))
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias"""))
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight"""))
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias"""))
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight"""))
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias"""))
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
])
# fmt: on
return rename_keys
def __A ( a_ :Union[str, Any] , a_ :List[str] , a_ :Any) -> Dict:
__a : List[str] = dct.pop(lowerCamelCase__)
__a : Union[str, Any] = val
def __A ( a_ :List[Any] , a_ :List[str] , a_ :Union[str, Any]) -> Dict:
__a : Union[str, Any] = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__a : Optional[int] = model_name_to_url[model_name]
__a : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''')["state_dict"]
__a : List[Any] = get_upernet_config(lowerCamelCase__)
__a : Tuple = UperNetForSemanticSegmentation(lowerCamelCase__)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a : Optional[Any] = state_dict.pop(lowerCamelCase__)
if "bn" in key:
__a : str = key.replace('''bn''' , '''batch_norm''')
__a : Union[str, Any] = val
# rename keys
__a : Tuple = create_rename_keys(lowerCamelCase__)
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
model.load_state_dict(lowerCamelCase__)
# verify on image
__a : List[str] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__a : Union[str, Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__).raw).convert('''RGB''')
__a : List[str] = SegformerImageProcessor()
__a : int = processor(lowerCamelCase__ , return_tensors='''pt''').pixel_values
with torch.no_grad():
__a : Tuple = model(lowerCamelCase__)
if model_name == "upernet-convnext-tiny":
__a : List[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]])
elif model_name == "upernet-convnext-small":
__a : Dict = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]])
elif model_name == "upernet-convnext-base":
__a : Tuple = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]])
elif model_name == "upernet-convnext-large":
__a : Dict = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]])
elif model_name == "upernet-convnext-xlarge":
__a : List[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]])
print('''Logits:''' , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase__)
print(F"""Saving processor to {pytorch_dump_folder_path}""")
processor.save_pretrained(lowerCamelCase__)
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""")
model.push_to_hub(F"""openmmlab/{model_name}""")
processor.push_to_hub(F"""openmmlab/{model_name}""")
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 160 |
"""simple docstring"""
import math
def _snake_case ( lowerCamelCase__ : list , lowerCamelCase__ : int ) -> int:
lowerCamelCase_ : int =len(lowerCamelCase__ )
lowerCamelCase_ : List[Any] =int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
lowerCamelCase_ : List[Any] =0
while arr[min(lowerCamelCase__ , lowerCamelCase__ ) - 1] < x:
lowerCamelCase_ : str =step
step += int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCamelCase_ : Dict =prev + 1
if prev == min(lowerCamelCase__ , lowerCamelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A__ : List[Any] = input('Enter numbers separated by a comma:\n').strip()
A__ : Optional[Any] = [int(item) for item in user_input.split(',')]
A__ : List[str] = int(input('Enter the number to be searched:\n'))
A__ : Any = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'Number {x} is at index {res}')
| 144 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase : List[str] = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
lowercase : Union[str, Any] = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
lowercase : Tuple = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
lowercase : Any = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
lowercase : List[Any] = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Value('string'),
}) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowercase , lowercase , lowercase=[1, 10, 100] , lowercase=4 , lowercase=3.0) -> Union[str, Any]:
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.')
with ThreadPoolExecutor(max_workers=lowercase) as executor:
a__ : List[str] = []
a__ : List[Any] = Counter()
a__ : Tuple = 0
a__ : Optional[int] = defaultdict(lowercase)
for task_id, (candidates, test_case) in enumerate(zip(lowercase , lowercase)):
for candidate in candidates:
a__ : List[Any] = candidate + '\n' + test_case
a__ : Optional[int] = (test_program, timeout, task_id, completion_id[task_id])
a__ : Tuple = executor.submit(lowercase , *lowercase)
futures.append(lowercase)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase):
a__ : Optional[Any] = future.result()
results[result["task_id"]].append((result['completion_id'], result))
a__ : Dict = [], []
for result in results.values():
result.sort()
a__ : Dict = [r[1]['passed'] for r in result]
total.append(len(lowercase))
correct.append(sum(lowercase))
a__ : Optional[int] = np.array(lowercase)
a__ : Tuple = np.array(lowercase)
a__ : Optional[int] = k
a__ : int = {F'pass@{k}': estimate_pass_at_k(lowercase , lowercase , lowercase).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A_ ( A__ , A__ , A__ ) -> Optional[int]:
def estimator(A__ , A__ , A__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowercase__ , lowercase__ ):
a__ : List[str] = itertools.repeat(lowercase__ , len(lowercase__ ) )
else:
assert len(lowercase__ ) == len(lowercase__ )
a__ : str = iter(lowercase__ )
return np.array([estimator(int(lowercase__ ) , int(lowercase__ ) , lowercase__ ) for n, c in zip(lowercase__ , lowercase__ )] )
| 362 |
def A_ ( A__ ) -> int:
if not isinstance(A__ , A__ ):
raise TypeError('only integers accepted as input' )
else:
a__ : List[Any] = str(abs(A__ ) )
a__ : Optional[int] = [list(A__ ) for char in range(len(A__ ) )]
for index in range(len(A__ ) ):
num_transpositions[index].pop(A__ )
return max(
int(''.join(list(A__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 225 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Any=3_0 , __UpperCamelCase : str=4_0_0 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , __UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , __UpperCamelCase : int=True , __UpperCamelCase : Dict=1 / 2_5_5 , __UpperCamelCase : Optional[int]=True , )->Any:
_UpperCAmelCase = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def lowercase__ ( self : Dict )->List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str=False )->Union[str, Any]:
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
_UpperCAmelCase = image.size
else:
_UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCAmelCase = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase = self.size["shortest_edge"]
_UpperCAmelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCAmelCase = self.size["shortest_edge"]
_UpperCAmelCase = self.size["shortest_edge"]
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(__lowerCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
_UpperCAmelCase = max(__lowerCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = ConditionalDetrImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[Any] )->List[Any]:
_UpperCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def lowercase__ ( self : Dict )->List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict )->Tuple:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase__ ( self : Optional[Any] )->Optional[Any]:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase__ ( self : Tuple )->Tuple:
pass
def lowercase__ ( self : List[Any] )->Tuple:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
_UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Optional[int] )->Dict:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Tuple )->Tuple:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase__ ( self : Union[str, Any] )->Dict:
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
_UpperCAmelCase = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
_UpperCAmelCase = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
_UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
_UpperCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase__ ( self : str )->Tuple:
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
_UpperCAmelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCAmelCase = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
_UpperCAmelCase = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
_UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
_UpperCAmelCase = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
_UpperCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
_UpperCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 260 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A : Optional[int] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
A : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Dict = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(_A , _A )
lowerCamelCase__ : List[Any] = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = "rougeLsum"
lowerCamelCase__ : List[str] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
lowerCamelCase__ : str = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : int = ["rouge1", "rouge2", "rougeL"]
lowerCamelCase__ : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
lowerCamelCase__ : Any = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
assert score_sep == score_no_sep
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCamelCase__ : Tuple = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(_A , _A , newline_sep=_A ) == calculate_rouge(_A , _A , newline_sep=_A )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : List[str] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCamelCase__ : str = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCamelCase__ : Union[str, Any] = calculate_rouge(_A , _A , rouge_keys=["rougeLsum"] , newline_sep=_A )["rougeLsum"]
lowerCamelCase__ : List[str] = calculate_rouge(_A , _A , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCamelCase__ : Any = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(_A , _A )
lowerCamelCase__ : str = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=_A )
assert isinstance(_A , _A )
| 184 | 0 |
from math import ceil, sqrt
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
__snake_case : List[str] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__snake_case : str = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__snake_case : Dict = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 356 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 69 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329 | 0 |
import argparse
import os
import re
lowerCamelCase__ : str = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowerCamelCase__ : Tuple = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ : str = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ : int = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ : int = re.compile(r'\[([^\]]+)\]')
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = _re_indent.search(__UpperCAmelCase )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]="" , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None ) -> Dict:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__UpperCAmelCase ):
index += 1
SCREAMING_SNAKE_CASE_ = ['\n'.join(lines[:index] )]
else:
SCREAMING_SNAKE_CASE_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
SCREAMING_SNAKE_CASE_ = [lines[index]]
index += 1
while index < len(__UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__UpperCAmelCase ) )
if index < len(__UpperCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_ = [lines[index + 1]]
index += 1
else:
SCREAMING_SNAKE_CASE_ = []
else:
blocks.append('\n'.join(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCAmelCase ) > 0:
blocks.append('\n'.join(__UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCAmelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
def _inner(__UpperCAmelCase : Tuple ):
return key(__UpperCAmelCase ).lower().replace('_' , '' )
return _inner
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=None ) -> Dict:
# If no key is provided, we use a noop.
def noop(__UpperCAmelCase : Dict ):
return x
if key is None:
SCREAMING_SNAKE_CASE_ = noop
# Constants are all uppercase, they go first.
SCREAMING_SNAKE_CASE_ = [obj for obj in objects if key(__UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
SCREAMING_SNAKE_CASE_ = [obj for obj in objects if key(__UpperCAmelCase )[0].isupper() and not key(__UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
SCREAMING_SNAKE_CASE_ = [obj for obj in objects if not key(__UpperCAmelCase )[0].isupper()]
SCREAMING_SNAKE_CASE_ = ignore_underscore(__UpperCAmelCase )
return sorted(__UpperCAmelCase , key=__UpperCAmelCase ) + sorted(__UpperCAmelCase , key=__UpperCAmelCase ) + sorted(__UpperCAmelCase , key=__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(__UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
SCREAMING_SNAKE_CASE_ = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE_ = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__UpperCAmelCase )] ) + "]"
SCREAMING_SNAKE_CASE_ = import_statement.split('\n' )
if len(__UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
SCREAMING_SNAKE_CASE_ = 2 if lines[1].strip() == '[' else 1
SCREAMING_SNAKE_CASE_ = [(i, _re_strip_line.search(__UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
SCREAMING_SNAKE_CASE_ = sort_objects(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] )
SCREAMING_SNAKE_CASE_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
SCREAMING_SNAKE_CASE_ = _re_bracket_content.sub(_replace , lines[1] )
else:
SCREAMING_SNAKE_CASE_ = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE_ = keys[:-1]
SCREAMING_SNAKE_CASE_ = get_indent(lines[1] ) + ', '.join([f"\"{k}\"" for k in sort_objects(__UpperCAmelCase )] )
return "\n".join(__UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
SCREAMING_SNAKE_CASE_ = _re_bracket_content.sub(_replace , __UpperCAmelCase )
return import_statement
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Any=True ) -> Any:
with open(__UpperCAmelCase , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
SCREAMING_SNAKE_CASE_ = split_code_in_indented_blocks(
__UpperCAmelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
SCREAMING_SNAKE_CASE_ = main_blocks[block_idx]
SCREAMING_SNAKE_CASE_ = block.split('\n' )
# Get to the start of the imports.
SCREAMING_SNAKE_CASE_ = 0
while line_idx < len(__UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
SCREAMING_SNAKE_CASE_ = '\n'.join(block_lines[line_idx:-1] )
SCREAMING_SNAKE_CASE_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
SCREAMING_SNAKE_CASE_ = split_code_in_indented_blocks(__UpperCAmelCase , indent_level=__UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
SCREAMING_SNAKE_CASE_ = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
SCREAMING_SNAKE_CASE_ = [(pattern.search(__UpperCAmelCase ).groups()[0] if pattern.search(__UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
SCREAMING_SNAKE_CASE_ = [(i, key) for i, key in enumerate(__UpperCAmelCase ) if key is not None]
SCREAMING_SNAKE_CASE_ = [x[0] for x in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__UpperCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
SCREAMING_SNAKE_CASE_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
SCREAMING_SNAKE_CASE_ = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any]=True ) -> List[str]:
SCREAMING_SNAKE_CASE_ = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE_ = sort_imports(os.path.join(__UpperCAmelCase , '__init__.py' ) , check_only=__UpperCAmelCase )
if result:
SCREAMING_SNAKE_CASE_ = [os.path.join(__UpperCAmelCase , '__init__.py' )]
if len(__UpperCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(__UpperCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCamelCase__ : Optional[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 352 |
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCAmelCase_ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 210 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = FlaxAutoencoderKL
@property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
| 82 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' )
if "norm" in key:
_lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' )
if "attn.q" in key:
_lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' )
if "bot_conv" in key:
_lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
_lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
_lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
_lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
_lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
_lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
_lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
_lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowerCAmelCase = GLPNImageProcessor()
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_lowerCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase = rename_keys(snake_case )
# key and value matrices need special treatment
read_in_k_v(snake_case , snake_case )
# create HuggingFace model and load state dict
_lowerCAmelCase = GLPNForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# forward pass
_lowerCAmelCase = model(snake_case )
_lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
_lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
_lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
A__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""vocab_file""": """vocab.json"""}
UpperCamelCase__ = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
UpperCamelCase__ = {"""mgp-str""": 27}
class a__ ( snake_case__ ):
_a : int = VOCAB_FILES_NAMES
_a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _A , _A="[GO]" , _A="[GO]" , _A="[s]" , _A="[GO]" , **_A ):
"""simple docstring"""
super().__init__(
unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , **_A , )
with open(_A , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(_A )
__lowerCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = []
for s in text:
char_tokens.extend(_A )
return char_tokens
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.decoder.get(_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error("Vocabulary path ({}) should be a directory".format(_A ) )
return
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" )
return (vocab_file,)
| 102 |
from pathlib import Path
import fire
from tqdm import tqdm
def _a ( SCREAMING_SNAKE_CASE_ : Dict="ro" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="en" , SCREAMING_SNAKE_CASE_ : Optional[Any]="wmt16" , SCREAMING_SNAKE_CASE_ : List[str]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
__lowerCAmelCase = datasets.load_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if save_dir is None:
__lowerCAmelCase = F"""{dataset}-{pair}"""
__lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__lowerCAmelCase = "val" if split == "validation" else split
__lowerCAmelCase = save_dir.joinpath(F"""{fn}.source""" )
__lowerCAmelCase = save_dir.joinpath(F"""{fn}.target""" )
__lowerCAmelCase = src_path.open("w+" )
__lowerCAmelCase = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowerCAmelCase = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 102 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
snake_case_ = None
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
snake_case_ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
snake_case_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : int = ['input_ids', 'attention_mask']
A_ : Tuple = MBartTokenizer
A_ : List[int] = []
A_ : List[int] = []
def __init__(self : Optional[int] , a__ : str=None , a__ : int=None , a__ : Any="<s>" , a__ : Any="</s>" , a__ : Any="</s>" , a__ : List[str]="<s>" , a__ : List[str]="<unk>" , a__ : Dict="<pad>" , a__ : List[Any]="<mask>" , a__ : Union[str, Any]=None , a__ : Any=None , a__ : Union[str, Any]=None , **a__ : int , ):
"""simple docstring"""
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
vocab_file=a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , src_lang=a__ , tgt_lang=a__ , additional_special_tokens=a__ , **a__ , )
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
__snake_case = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case = {
lang_code: self.convert_tokens_to_ids(a__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case = src_lang if src_lang is not None else '''en_XX'''
__snake_case = self.convert_tokens_to_ids(self._src_lang )
__snake_case = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a (self : Optional[int] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a (self : Tuple , a__ : str ):
"""simple docstring"""
__snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a (self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a (self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a (self : List[Any] , a__ : Optional[Any] , a__ : str , a__ : Optional[str] , a__ : Optional[str] , **a__ : int ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case = src_lang
__snake_case = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
__snake_case = self.convert_tokens_to_ids(a__ )
__snake_case = tgt_lang_id
return inputs
def a (self : Tuple , a__ : List[str] , a__ : str = "en_XX" , a__ : Optional[List[str]] = None , a__ : str = "ro_RO" , **a__ : Tuple , ):
"""simple docstring"""
__snake_case = src_lang
__snake_case = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def a (self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a (self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a (self : Optional[int] , a__ : str ):
"""simple docstring"""
__snake_case = self.convert_tokens_to_ids(a__ )
__snake_case = []
__snake_case = [self.eos_token_id, self.cur_lang_code]
__snake_case = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a (self : Union[str, Any] , a__ : str ):
"""simple docstring"""
__snake_case = self.convert_tokens_to_ids(a__ )
__snake_case = []
__snake_case = [self.eos_token_id, self.cur_lang_code]
__snake_case = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a (self : int , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 24 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
__lowercase= self.vocab_size - 1
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase_ : Tuple =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase_ : List[str] =(
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowercase= torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , )
__lowercase= inputs_dict['labels']
__lowercase= inputs_dict['labels']
__lowercase= torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= OpenAIGPTModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is
__lowercase= [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
| 295 | 0 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : str = range(3, int(math.sqrt(A_ ) + 1 ), 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( A_ : Any, A_ : Tuple=1, **A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Any = factor * value
_lowerCamelCase : List[str] = value
while not is_prime(A_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **A_ )
return value
| 175 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def snake_case_ ( A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_lowerCamelCase : Union[str, Any] = int(re.match(R'''.*layer_(\d*).*''', A_ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
_lowerCamelCase : List[str] = re.search(R'''[^\d](\d+)$''', str(A_ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
_lowerCamelCase : Optional[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case_ ( A_ : str, A_ : Any, A_ : int, A_ : List[str], A_ : Any ):
'''simple docstring'''
if bloom_config_file == "":
_lowerCamelCase : Dict = BloomConfig()
else:
_lowerCamelCase : Any = BloomConfig.from_json_file(A_ )
if shard_model:
_lowerCamelCase : Optional[int] = os.listdir(A_ )
_lowerCamelCase : List[str] = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s, A_ ) )
_lowerCamelCase : str = {'''weight_map''': {}, '''metadata''': {}}
_lowerCamelCase : List[str] = 0
_lowerCamelCase : str = None
_lowerCamelCase : str = BloomConfig()
for j, file in enumerate(A_ ):
print('''Processing file: {}'''.format(A_ ) )
_lowerCamelCase : List[Any] = None
for i in range(A_ ):
# load all TP files
_lowerCamelCase : Any = file.replace('''model_00''', F'''model_0{i}''' )
_lowerCamelCase : Any = torch.load(os.path.join(A_, A_ ), map_location='''cpu''' )
# Rename keys in the transformers names
_lowerCamelCase : Optional[Any] = list(temp.keys() )
for key in keys:
_lowerCamelCase : List[Any] = temp.pop(A_ )
if tensors is None:
_lowerCamelCase : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCamelCase : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCamelCase : Optional[Any] = tensors[key] / pretraining_tp
torch.save(
A_, os.path.join(
A_, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) ), ), )
for key in tensors.keys():
_lowerCamelCase : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_lowerCamelCase : Tuple = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) )
_lowerCamelCase : List[Any] = BloomConfig()
_lowerCamelCase : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_lowerCamelCase : Union[str, Any] = total_size
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A_, WEIGHTS_NAME + '''.index.json''' ), '''w''', encoding='''utf-8''' ) as f:
_lowerCamelCase : Any = json.dumps(A_, indent=2, sort_keys=A_ ) + '''\n'''
f.write(A_ )
else:
_lowerCamelCase : Tuple = BloomModel(A_ )
_lowerCamelCase : Optional[int] = os.listdir(A_ )
_lowerCamelCase : Union[str, Any] = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s, A_ ) )
_lowerCamelCase : int = None
for i, file in enumerate(A_ ):
_lowerCamelCase : Optional[int] = None
for i in range(A_ ):
# load all TP files
_lowerCamelCase : str = file.replace('''model_00''', F'''model_0{i}''' )
_lowerCamelCase : List[Any] = torch.load(os.path.join(A_, A_ ), map_location='''cpu''' )
# Rename keys in the transformers names
_lowerCamelCase : List[Any] = list(temp.keys() )
for key in keys:
_lowerCamelCase : Dict = temp.pop(A_ )
if tensors is None:
_lowerCamelCase : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCamelCase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCamelCase : List[Any] = tensors[key] / pretraining_tp
_lowerCamelCase : List[str] = model.load_state_dict(A_, strict=A_ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
_lowerCamelCase : Optional[Any] = set(other_keys.missing_keys )
else:
_lowerCamelCase : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(A_, exist_ok=A_ )
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_lowerCamelCase : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
_lowerCamelCase : Dict = model.to(config.torch_dtype )
torch.save(model.state_dict(), A_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 175 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class a__ ( nn.Module ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = jnp.floataa
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowercase ) -> Any:
'''simple docstring'''
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = jax.image.resize(
lowercase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
A__ = self.conv(lowercase )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = jnp.floataa
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowercase ) -> List[str]:
'''simple docstring'''
A__ = self.conv(lowercase )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = 0.0
__lowerCamelCase = None
__lowerCamelCase = jnp.floataa
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.in_channels if self.out_channels is None else self.out_channels
A__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A__ = nn.Conv(
lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A__ = nn.Dense(lowercase , dtype=self.dtype )
A__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A__ = nn.Dropout(self.dropout_prob )
A__ = nn.Conv(
lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
A__ = None
if use_nin_shortcut:
A__ = nn.Conv(
lowercase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowercase , lowercase , lowercase=True ) -> Optional[Any]:
'''simple docstring'''
A__ = hidden_states
A__ = self.norma(lowercase )
A__ = nn.swish(lowercase )
A__ = self.conva(lowercase )
A__ = self.time_emb_proj(nn.swish(lowercase ) )
A__ = jnp.expand_dims(jnp.expand_dims(lowercase , 1 ) , 1 )
A__ = hidden_states + temb
A__ = self.norma(lowercase )
A__ = nn.swish(lowercase )
A__ = self.dropout(lowercase , lowercase )
A__ = self.conva(lowercase )
if self.conv_shortcut is not None:
A__ = self.conv_shortcut(lowercase )
return hidden_states + residual
| 68 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a__ :
"""simple docstring"""
__lowerCamelCase = BlenderbotSmallConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowercase , attention_mask=lowercase )[0]
A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__lowerCamelCase = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 68 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
A : Tuple = logging.getLogger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''token-classification'''
def __init__(self : List[str] , _UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
if type(_UpperCAmelCase ) == dict:
lowercase__ = Namespace(**_UpperCAmelCase )
lowercase__ = import_module("""tasks""" )
try:
lowercase__ = getattr(_UpperCAmelCase , hparams.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ = self.token_classification_task.get_labels(hparams.labels )
lowercase__ = CrossEntropyLoss().ignore_index
super().__init__(_UpperCAmelCase , len(self.labels ) , self.mode )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return self.model(**_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**_UpperCAmelCase )
lowercase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ = self._feature_file(_UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , _UpperCAmelCase )
lowercase__ = torch.load(_UpperCAmelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowercase__ = self.token_classification_task.read_examples_from_file(args.data_dir , _UpperCAmelCase )
lowercase__ = self.token_classification_task.convert_examples_to_features(
_UpperCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , _UpperCAmelCase )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool = False ) -> DataLoader:
"""simple docstring"""
lowercase__ = self._feature_file(_UpperCAmelCase )
logger.info("""Loading features from cached file %s""" , _UpperCAmelCase )
lowercase__ = torch.load(_UpperCAmelCase )
lowercase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , batch_size=_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
"""Compute validation""" ""
lowercase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**_UpperCAmelCase )
lowercase__ , lowercase__ = outputs[:2]
lowercase__ = logits.detach().cpu().numpy()
lowercase__ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
lowercase__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowercase__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowercase__ = np.argmax(_UpperCAmelCase , axis=2 )
lowercase__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowercase__ = dict(enumerate(self.labels ) )
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(_UpperCAmelCase , _UpperCAmelCase ),
"""precision""": precision_score(_UpperCAmelCase , _UpperCAmelCase ),
"""recall""": recall_score(_UpperCAmelCase , _UpperCAmelCase ),
"""f1""": fa_score(_UpperCAmelCase , _UpperCAmelCase ),
}
lowercase__ = dict(results.items() )
lowercase__ = results
return ret, preds_list, out_label_list
def lowerCamelCase__ (self : str , _UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(_UpperCAmelCase )
lowercase__ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(_UpperCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
BaseTransformer.add_model_specific_args(_UpperCAmelCase , _UpperCAmelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=_UpperCAmelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=_UpperCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=_UpperCAmelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=_UpperCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
A : str = NERTransformer.add_model_specific_args(parser, os.getcwd())
A : str = parser.parse_args()
A : Union[str, Any] = NERTransformer(args)
A : Optional[Any] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
A : Dict = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
A : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 371 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = OpenAIGPTTokenizer
A__ = OpenAIGPTTokenizerFast
A__ = True
A__ = False
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_UpperCAmelCase ) )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase__ = """lower"""
lowercase__ = ["""low""", """er</w>"""]
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokens + ["""<unk>"""]
lowercase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any]=15 ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
lowercase__ = """This is a simple input"""
lowercase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase__ = ("""This is a simple input""", """This is a pair""")
lowercase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A ( UpperCAmelCase__ ):
'''simple docstring'''
pass
| 146 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
__lowerCAmelCase = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
__lowerCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self : Any ,_a : Dict ,_a : Tuple="<s>" ,_a : Optional[Any]="</s>" ,_a : Optional[int]="</s>" ,_a : Optional[Any]="<s>" ,_a : Dict="<unk>" ,_a : List[str]="<pad>" ,_a : Optional[Any]="<mask>" ,_a : str=None ,_a : Union[str, Any]=None ,_a : Optional[int]=None ,_a : Optional[Dict[str, Any]] = None ,_a : List[str]=None ,**_a : Any ,):
'''simple docstring'''
_a : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,tokenizer_file=_a ,src_lang=_a ,tgt_lang=_a ,additional_special_tokens=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a : Optional[int] = 1
_a : List[Any] = len(self.sp_model )
_a : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a )
}
_a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
_a : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a : List[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_a : List[Any] = src_lang if src_lang is not None else 'en_XX'
_a : Optional[int] = self.lang_code_to_id[self._src_lang]
_a : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : int ):
'''simple docstring'''
_a : int = self.__dict__.copy()
_a : Tuple = None
_a : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int ,_a : List[str] ):
'''simple docstring'''
_a : int = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Dict = {}
_a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowercase ( self : int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowercase ( self : List[str] ,_a : str ):
'''simple docstring'''
_a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
_a : str = [1] * len(self.prefix_tokens )
_a : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def __lowercase ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : List[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Optional[int] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ,_a : str ,_a : Optional[str] ,_a : Optional[str] ,**_a : Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_a : Dict = src_lang
_a : str = self(_a ,add_special_tokens=_a ,return_tensors=_a ,**_a )
_a : int = self.convert_tokens_to_ids(_a )
_a : Any = tgt_lang_id
return inputs
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Tuple ,_a : Dict ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : str = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : Optional[int] ,_a : str ):
'''simple docstring'''
_a : str = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : str = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __lowercase ( self : Union[str, Any] ,_a : List[str] ,_a : str = "en_XX" ,_a : Optional[List[str]] = None ,_a : str = "ro_RO" ,**_a : List[str] ,):
'''simple docstring'''
_a : Any = src_lang
_a : int = tgt_lang
return super().prepare_seqaseq_batch(_a ,_a ,**_a )
def __lowercase ( self : int ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : int ,_a : Optional[int] ):
'''simple docstring'''
_a : Any = self.lang_code_to_id[src_lang]
_a : List[Any] = []
_a : Any = [self.eos_token_id, self.cur_lang_code]
def __lowercase ( self : List[str] ,_a : str ):
'''simple docstring'''
_a : Any = self.lang_code_to_id[lang]
_a : Dict = []
_a : Optional[int] = [self.eos_token_id, self.cur_lang_code]
| 271 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Optional[int] = []
_a : int = len(__a )
for i in range(__a ):
_a : float = -1
for j in range(i + 1 , __a ):
if arr[i] < arr[j]:
_a : Any = arr[j]
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Tuple = []
for i, outer in enumerate(__a ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : Dict = inner
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : int = len(__a )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 271 | 1 |
from math import pow, sqrt
def __lowercase ( *__lowerCAmelCase : float ):
a__ = len(__lowerCAmelCase ) > 0 and all(value > 0.0 for value in values )
return result
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 370 |
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowercase ( __lowerCAmelCase : dict[int, list[int]] ):
a__ = 0
a__ = len(__lowerCAmelCase ) # No of vertices in graph
a__ = [0] * n
a__ = [False] * n
def dfs(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a__ = True
a__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
a__ = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
a__ = min(low[at] , low[to] )
a__ = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float = 0 ) -> None:
lowerCamelCase__ , lowerCamelCase__ : str = row, column
lowerCamelCase__ : str = [[default_value for c in range(UpperCAmelCase )] for r in range(UpperCAmelCase )]
def __str__( self : str ) -> str:
lowerCamelCase__ : str = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCamelCase__ : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ : List[Any] = max(UpperCAmelCase , len(str(UpperCAmelCase ) ) )
lowerCamelCase__ : List[Any] = F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase : list[float] ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ : List[Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[Any] ) -> str:
return str(self )
def A_ ( self : Any , UpperCAmelCase : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase , (list, tuple) ) and len(UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , UpperCAmelCase : tuple[int, int] , UpperCAmelCase : float ) -> None:
assert self.validate_indicies(UpperCAmelCase )
lowerCamelCase__ : Dict = value
def __add__( self : Optional[Any] , UpperCAmelCase : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase , UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Dict = self[r, c] + another[r, c]
return result
def __neg__( self : Dict ) -> Matrix:
lowerCamelCase__ : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : List[str] = -self[r, c]
return result
def __sub__( self : List[Any] , UpperCAmelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Tuple , UpperCAmelCase : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase , (int, float) ): # Scalar multiplication
lowerCamelCase__ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Dict = self[r, c] * another
return result
elif isinstance(UpperCAmelCase , UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ : Optional[int] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ : Any = F"""Unsupported type given for another ({type(UpperCAmelCase )})"""
raise TypeError(UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Matrix:
lowerCamelCase__ : str = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Tuple = self[r, c]
return result
def A_ ( self : Tuple , UpperCAmelCase : Matrix , UpperCAmelCase : Matrix ) -> Any:
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ : Any = v.transpose()
lowerCamelCase__ : Dict = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
# a^(-1)
lowerCamelCase__ : str = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ : str = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCamelCase__ : Any = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 2, -3
lowerCamelCase__ : Tuple = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_UpperCAmelCase , _UpperCAmelCase )}""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
import doctest
doctest.testmod()
testa()
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Tuple = use_attention_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = True
lowerCAmelCase :Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = FlaxRoFormerModelTester(self)
@slow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase)
UpperCAmelCase__ : Dict = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase)
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
UpperCAmelCase__ : int = jnp.array([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : Union[str, Any] = 5_0000
UpperCAmelCase__ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4)) | 283 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :torch.FloatTensor
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = ("DownEncoderBlock2D",) , _lowerCamelCase = ("UpDecoderBlock2D",) , _lowerCamelCase = (64,) , _lowerCamelCase = 1 , _lowerCamelCase = "silu" , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 256 , _lowerCamelCase = 32 , _lowerCamelCase = None , _lowerCamelCase = 0.18215 , _lowerCamelCase = "group" , ):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : str = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ : Any = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
UpperCAmelCase__ : Optional[int] = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
# pass init params to Decoder
UpperCAmelCase__ : str = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Union[str, Any] = self.encoder(_lowerCamelCase)
UpperCAmelCase__ : str = self.quant_conv(_lowerCamelCase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase)
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True):
# also go through quantization layer
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.quantize(_lowerCamelCase)
else:
UpperCAmelCase__ : Union[str, Any] = h
UpperCAmelCase__ : Any = self.post_quant_conv(_lowerCamelCase)
UpperCAmelCase__ : Any = self.decoder(_lowerCamelCase , quant if self.config.norm_type == """spatial""" else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Dict = sample
UpperCAmelCase__ : Dict = self.encode(_lowerCamelCase).latents
UpperCAmelCase__ : List[str] = self.decode(_lowerCamelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase) | 283 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : int=32 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Optional[Any]=[10, 20, 30, 40] , lowerCamelCase_ : List[str]=[2, 2, 3, 2] , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[int]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : Optional[int]=0.0_2 , lowerCamelCase_ : Any=["stage2", "stage3", "stage4"] , lowerCamelCase_ : int=[2, 3, 4] , lowerCamelCase_ : List[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = ConvNextVaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ConvNextVaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextVaBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextVaBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = ConvNextVaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase = True
if model_class.__name__ in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]:
continue
UpperCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase = model(**lowerCamelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase = False
UpperCamelCase = True
if (
model_class.__name__
in [*get_values(lowerCamelCase__ ), *get_values(lowerCamelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase = model(**lowerCamelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ):
UpperCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextVaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(lowerCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = preprocessor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 343 |
'''simple docstring'''
def _A ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__A = generate_large_matrix()
__A = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( lowercase__ ):
assert all(row == sorted(lowercase__ , reverse=lowercase__ ) for row in grid )
assert all(list(lowercase__ ) == sorted(lowercase__ , reverse=lowercase__ ) for col in zip(*lowercase__ ) )
def _A ( lowercase__ ):
lowercase__ = 0
lowercase__ = len(lowercase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase__ = (left + right) // 2
lowercase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase__ = mid + 1
else:
lowercase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase__ )
def _A ( lowercase__ ):
lowercase__ = 0
lowercase__ = len(grid[0] )
for i in range(len(lowercase__ ) ):
lowercase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase__ ) * len(grid[0] )) - total
def _A ( lowercase__ ):
return len([number for row in grid for number in row if number < 0] )
def _A ( lowercase__ ):
lowercase__ = 0
for row in grid:
for i, number in enumerate(lowercase__ ):
if number < 0:
total += len(lowercase__ ) - i
break
return total
def _A ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowercase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase__ = timeit(f'''{func}(grid=grid)''' , setup=lowercase__ , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 164 | 0 |
'''simple docstring'''
def a__ ( lowercase : list ) -> float:
"""simple docstring"""
_UpperCamelCase = 0
while len(lowerCAmelCase__ ) > 1:
_UpperCamelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_UpperCamelCase = files.index(min(lowerCAmelCase__ ) )
temp += files[min_index]
files.pop(lowerCAmelCase__ )
files.append(lowerCAmelCase__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ : List[Any] = parse(importlib.metadata.version('torch'))
def a__ ( lowercase : Union[str, Version], lowercase : str, lowercase : str ) -> List[str]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_UpperCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase, lowercase ):
_UpperCamelCase = parse(importlib.metadata.version(lowercase ) )
return operation(lowercase, parse(lowercase ) )
def a__ ( lowercase : str, lowercase : str ) -> List[Any]:
"""simple docstring"""
return compare_versions(lowercase, lowercase, lowercase )
| 287 | 0 |
from torch import nn
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 43 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
_SCREAMING_SNAKE_CASE = """CIDAS/clipseg-rd64-refined"""
_SCREAMING_SNAKE_CASE = """image_segmenter"""
_SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
_SCREAMING_SNAKE_CASE = ["""image""", """text"""]
_SCREAMING_SNAKE_CASE = ["""image"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(self , ['vision'] )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
with torch.no_grad():
lowerCAmelCase_ : List[str] = self.model(**SCREAMING_SNAKE_CASE_ ).logits
return logits
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Dict = outputs.cpu().detach().numpy()
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Optional[Any] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 224 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_lowerCamelCase : Dict = logging.getLogger(__name__)
class lowercase ( a ):
lowercase__ : Optional[int] = """token-classification"""
def __init__( self : Any , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if type(_UpperCamelCase ) == dict:
SCREAMING_SNAKE_CASE = Namespace(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = import_module("tasks" )
try:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , hparams.task_type )
SCREAMING_SNAKE_CASE = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
SCREAMING_SNAKE_CASE = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE = CrossEntropyLoss().ignore_index
super().__init__(_UpperCamelCase , len(self.labels ) , self.mode )
def __snake_case( self : str , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
return self.model(**_UpperCamelCase )
def __snake_case( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE = self(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE = self._feature_file(_UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
SCREAMING_SNAKE_CASE = self.token_classification_task.read_examples_from_file(args.data_dir , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.token_classification_task.convert_examples_to_features(
_UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_UpperCamelCase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , _UpperCamelCase )
torch.save(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : bool = False ) -> DataLoader:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._feature_file(_UpperCamelCase )
logger.info("Loading features from cached file %s" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , batch_size=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
"""Compute validation""" ""
SCREAMING_SNAKE_CASE = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE = self(**_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs[:2]
SCREAMING_SNAKE_CASE = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __snake_case( self : List[Any] , _UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.stack([x["val_loss"] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE = np.concatenate([x["pred"] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE = np.argmax(_UpperCamelCase , axis=2 )
SCREAMING_SNAKE_CASE = np.concatenate([x["target"] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(_UpperCamelCase , _UpperCamelCase ),
"precision": precision_score(_UpperCamelCase , _UpperCamelCase ),
"recall": recall_score(_UpperCamelCase , _UpperCamelCase ),
"f1": fa_score(_UpperCamelCase , _UpperCamelCase ),
}
SCREAMING_SNAKE_CASE = dict(results.items() )
SCREAMING_SNAKE_CASE = results
return ret, preds_list, out_label_list
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._eval_end(_UpperCamelCase )
SCREAMING_SNAKE_CASE = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._eval_end(_UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __snake_case( _UpperCamelCase : Any , _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(_UpperCamelCase , _UpperCamelCase )
parser.add_argument(
"--task_type" , default="NER" , type=_UpperCamelCase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=_UpperCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=_UpperCamelCase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=_UpperCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_lowerCamelCase : str = NERTransformer.add_model_specific_args(parser, os.getcwd())
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : Optional[int] = NERTransformer(args)
_lowerCamelCase : List[str] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
_lowerCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 206 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Dict = (UniPCMultistepScheduler,)
lowercase__ : Optional[int] = (("""num_inference_steps""", 25),)
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : List[str] , _UpperCamelCase : Dict=0 , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(_UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : Any , _UpperCamelCase : Union[str, Any]=0 , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : List[str] , _UpperCamelCase : Tuple=None , **_UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_UpperCamelCase , time_step=0 )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __snake_case( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=_UpperCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def __snake_case( self : List[str] , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 206 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''detr'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : List[str]=100 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Any=2_048 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Any=2_048 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : int=256 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]="sine" , UpperCAmelCase__ : Union[str, Any]="resnet50" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=0.1 , **UpperCAmelCase__ : str , ) ->List[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = backbone_config.get('''model_type''')
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(UpperCAmelCase__)
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[Any]) ->str:
'''simple docstring'''
return cls(backbone_config=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict[str, any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
return 12
| 14 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 0 |
'''simple docstring'''
import string
def _A ( _lowerCAmelCase ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__lowercase =''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase =string.ascii_uppercase.find(_lowerCAmelCase )
__lowercase =num - key
if num < 0:
__lowercase =num + len(string.ascii_uppercase )
__lowercase =translated + string.ascii_uppercase[num]
else:
__lowercase =translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def _A ( ):
"""simple docstring"""
__lowercase =input('Encrypted message: ' )
__lowercase =message.upper()
decrypt(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 48 |
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCAmelCase ).json()
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase ='https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__lowercase =requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase =hackernews_top_stories(_lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48 | 1 |
import math
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
return math.sqrt(_A ) * math.sqrt(_A ) == num
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = n
while left <= right:
SCREAMING_SNAKE_CASE__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE__ = mid - 1
else:
SCREAMING_SNAKE_CASE__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 | 0 |
"""simple docstring"""
from math import factorial, pi
def __snake_case ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
_UpperCAmelCase : int = float(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
_UpperCAmelCase : Tuple = float(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 202 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowerCAmelCase : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase : str = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __snake_case ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = torch_model.model.state_dict()
_UpperCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
_UpperCAmelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase : Any = v.T
_UpperCAmelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase : Tuple = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCAmelCase : Any = mapping["shared.weight"]
_UpperCAmelCase : Dict = mapping["shared.weight"]
_UpperCAmelCase : Dict = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Optional[Any] = ["Adafactor", "global_step"]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc="converting tf checkpoint to dict" ):
_UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = array
return tf_weights
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(SCREAMING_SNAKE_CASE__ ).parent.name
_UpperCAmelCase : Tuple = task_specific_params[f'summarization_{dataset}']["max_position_embeddings"]
_UpperCAmelCase : Dict = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
_UpperCAmelCase : Union[str, Any] = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase : Optional[int] = task_specific_params
_UpperCAmelCase : str = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
_lowerCAmelCase : Tuple = Path(args.tf_ckpt_path).parent.name
_lowerCAmelCase : Dict = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 202 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self ,__UpperCamelCase = 768 ,) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase_ : Any = nn.Parameter(torch.zeros(1 ,__UpperCamelCase ) )
lowercase_ : Tuple = nn.Parameter(torch.ones(1 ,__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
lowercase_ : Optional[Any] = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = (embeds * self.std) + self.mean
return embeds
| 213 | """simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : str = data
# Initialize hash values
lowercase_ : Optional[int] = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowercase_ : Tuple = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
lowercase_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> bytes:
'''simple docstring'''
lowercase_ : str = B'\x80' + (B'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64))
lowercase_ : str = struct.pack('>Q' ,(len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase_ : Any = list(struct.unpack('>16L' ,__UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase_ : str = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowercase_ : int = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowercase_ : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowercase_ : Tuple = self.ror(__UpperCamelCase ,6 ) ^ self.ror(__UpperCamelCase ,11 ) ^ self.ror(__UpperCamelCase ,25 )
lowercase_ : Union[str, Any] = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
lowercase_ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowercase_ : Optional[int] = self.ror(__UpperCamelCase ,2 ) ^ self.ror(__UpperCamelCase ,13 ) ^ self.ror(__UpperCamelCase ,22 )
lowercase_ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
lowercase_ : Any = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowercase_ : str = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowercase_ : Any = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
import hashlib
lowercase_ : Union[str, Any] = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash ,hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def lowercase__( ):
import doctest
doctest.testmod()
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowercase_ : Any = parser.parse_args()
lowercase_ : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase_ : str = f.read()
else:
lowercase_ : Optional[int] = bytes(__SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(__SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 213 | 1 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : Tuple = args.pruning_method
lowercase_ : Dict = args.threshold
lowercase_ : Tuple = args.model_name_or_path.rstrip("""/""" )
lowercase_ : int = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
lowercase_ : int = torch.load(os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) )
lowercase_ : Dict = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase_ : Optional[Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
lowercase_ : Any = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
lowercase_ : int = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
lowercase_ : List[Any] = MagnitudeBinarizer.apply(inputs=UpperCAmelCase__ , threshold=UpperCAmelCase__ )
lowercase_ : int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase_ : List[str] = name[:-6]
lowercase_ : str = model[F'''{prefix_}mask_scores''']
lowercase_ : List[Any] = TopKBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : str = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase_ : int = name[:-6]
lowercase_ : List[str] = model[F'''{prefix_}mask_scores''']
lowercase_ : Optional[int] = ThresholdBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase_ : Tuple = name[:-6]
lowercase_ : List[Any] = model[F'''{prefix_}mask_scores''']
lowercase_ , lowercase_ : List[str] = -0.1, 1.1
lowercase_ : Optional[Any] = torch.sigmoid(UpperCAmelCase__ )
lowercase_ : int = s * (r - l) + l
lowercase_ : Dict = s_bar.clamp(min=0.0 , max=1.0 )
lowercase_ : Union[str, Any] = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowercase_ : Optional[int] = os.path.join(
os.path.dirname(UpperCAmelCase__ ) , F'''bertarized_{os.path.basename(UpperCAmelCase__ )}''' )
if not os.path.isdir(UpperCAmelCase__ ):
shutil.copytree(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_lowercase : str = parser.parse_args()
main(args)
| 21 | '''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 21 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 66 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(A_ )} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowercase__ = field(
default=1_28, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=1_28, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
lowercase__ = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
lowercase__ = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowercase__ = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
lowercase__ = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''train'''
lowercase__ = '''dev'''
class UpperCAmelCase_ ( A_ ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __init__( self : List[Any] , snake_case_ : SquadDataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : Optional[int] = None , snake_case_ : Union[str, Split] = Split.train , snake_case_ : Optional[bool] = False , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "pt" , ) -> Union[str, Any]:
'''simple docstring'''
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case_ , snake_case_ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
A__ = mode
# Load data features from cache or dataset file
A__ = "v2" if args.version_2_with_negative else "v1"
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(snake_case_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features["features"]
A__ = self.old_features.get("dataset" , snake_case_ )
A__ = self.old_features.get("examples" , snake_case_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__, A__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case_ , )
A__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case_ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids , dtype=torch.long )
A__ = torch.tensor(feature.attention_mask , dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
A__ = torch.tensor(feature.cls_index , dtype=torch.long )
A__ = torch.tensor(feature.p_mask , dtype=torch.float )
A__ = torch.tensor(feature.is_impossible , dtype=torch.float )
A__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position , dtype=torch.long )
A__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 247 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCamelCase ( _a , _a , _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :List[Any] = state_dict.pop(_a )
lowercase_ :List[str] = val
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
lowercase_ :Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase_ :Tuple = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase_ :int = value
else:
lowercase_ :Union[str, Any] = value
return new_state_dict
def UpperCamelCase ( _a , _a=False ) -> str:
'''simple docstring'''
lowercase_ :List[str] = ''''''
if is_panoptic:
lowercase_ :List[str] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase_ :List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowercase_ :Tuple = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ :int = in_proj_weight[:2_5_6, :]
lowercase_ :List[Any] = in_proj_bias[:2_5_6]
lowercase_ :List[str] = in_proj_weight[2_5_6:5_1_2, :]
lowercase_ :List[str] = in_proj_bias[2_5_6:5_1_2]
lowercase_ :Dict = in_proj_weight[-2_5_6:, :]
lowercase_ :Optional[Any] = in_proj_bias[-2_5_6:]
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase_ :str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ :List[str] = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _a , _a ) -> List[Any]:
'''simple docstring'''
lowercase_ :Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase_ :List[str] = '''resnet101'''
if "dc5" in model_name:
lowercase_ :List[Any] = True
lowercase_ :int = '''panoptic''' in model_name
if is_panoptic:
lowercase_ :Optional[Any] = 2_5_0
else:
lowercase_ :List[str] = 9_1
lowercase_ :List[str] = '''huggingface/label-files'''
lowercase_ :List[Any] = '''coco-detection-id2label.json'''
lowercase_ :Optional[int] = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ :List[str] = {int(_a ): v for k, v in idalabel.items()}
lowercase_ :int = idalabel
lowercase_ :List[str] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase_ :int = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase_ :Tuple = ConditionalDetrImageProcessor(format=_a )
# prepare image
lowercase_ :Any = prepare_img()
lowercase_ :List[str] = image_processor(images=_a , return_tensors='''pt''' )
lowercase_ :List[Any] = encoding['''pixel_values''']
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
lowercase_ :int = torch.hub.load('''DeppMeng/ConditionalDETR''' , _a , pretrained=_a ).eval()
lowercase_ :Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase_ :List[str] = '''conditional_detr.''' + src
rename_key(_a , _a , _a )
lowercase_ :Dict = rename_backbone_keys(_a )
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase_ :Optional[Any] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase_ :Any = state_dict.pop(_a )
lowercase_ :Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase_ :Optional[int] = state_dict.pop(_a )
lowercase_ :Union[str, Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase_ :Any = state_dict.pop(_a )
lowercase_ :Tuple = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase_ :Dict = state_dict.pop(_a )
lowercase_ :Any = val
# finally, create HuggingFace model and load state dict
lowercase_ :Optional[Any] = ConditionalDetrForSegmentation(_a ) if is_panoptic else ConditionalDetrForObjectDetection(_a )
model.load_state_dict(_a )
model.eval()
model.push_to_hub(repo_id=_a , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase_ :int = conditional_detr(_a )
lowercase_ :Dict = model(_a )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 252 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] ="""naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase : Optional[int] =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase : Dict ="""document_qa"""
lowercase : List[Any] =AutoProcessor
lowercase : Tuple =VisionEncoderDecoderModel
lowercase : Optional[int] =["""image""", """text"""]
lowercase : Any =["""text"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase_ :Optional[int] = task_prompt.replace('''{user_input}''' , UpperCamelCase_ )
lowercase_ :List[Any] = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase_ :Dict = self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase ( self , UpperCamelCase_ ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Any = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
lowercase_ :Any = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowercase_ :int = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowercase_ :Dict = re.sub(R'''<.*?>''' , '''''' , UpperCamelCase_ , count=1 ).strip() # remove first task start token
lowercase_ :Dict = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"]
| 252 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
lowercase__ = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class A :
'''simple docstring'''
A__ = OPTConfig
A__ = {}
A__ = '''gelu'''
def __init__(self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any=13 , _UpperCAmelCase : Union[str, Any]=7 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[Any]=99 , _UpperCAmelCase : str=16 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Tuple=16 , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = bos_token_id
lowercase__ = embed_dim
lowercase__ = word_embed_proj_dim
lowercase__ = False
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_UpperCAmelCase , **self.config_updates , )
lowercase__ = prepare_opt_inputs_dict(_UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFOPTModel(config=_UpperCAmelCase )
lowercase__ = inputs_dict["""input_ids"""]
lowercase__ = input_ids[:1, :]
lowercase__ = inputs_dict["""attention_mask"""][:1, :]
lowercase__ = 1
# first forward pass
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
lowercase__ , lowercase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase__ = output_from_no_past[:, -3:, random_slice_idx]
lowercase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
@require_tf
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A__ = (TFOPTForCausalLM,) if is_tf_available() else ()
A__ = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
A__ = False
A__ = False
A__ = False
A__ = 10
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = TFOPTModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ):
if hasattr(_UpperCAmelCase , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_UpperCAmelCase , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings() )
lowercase__ = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_UpperCAmelCase )
lowercase__ = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings() )
lowercase__ = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowercase__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _UpperCAmelCase )
# check that weights remain the same after resizing
lowercase__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase__ = False
self.assertTrue(_UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _UpperCAmelCase )
lowercase__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase__ = False
self.assertTrue(_UpperCAmelCase )
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return tf.constant(__magic_name__ , dtype=tf.intaa )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = 99
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowercase__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowercase__ = input_ids.shape[0]
lowercase__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowercase__ = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowercase__ = tf.not_equal(_UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
lowercase__ = model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ).last_hidden_state
lowercase__ = (1, 11, 512)
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4E-3 ) )
lowercase__ = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase )
lowercase__ = xla_generate(_UpperCAmelCase , _UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
super().setUp()
lowercase__ = """facebook/opt-350m"""
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowercase__ = GPTaTokenizer.from_pretrained(self.path_model )
lowercase__ = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowercase__ = tokenizer(_UpperCAmelCase , return_tensors="""tf""" , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowercase__ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4 ) )
lowercase__ = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase )
lowercase__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = """facebook/opt-125m"""
lowercase__ = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowercase__ = []
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
for prompt in self.prompts:
lowercase__ = tokenizer(_UpperCAmelCase , return_tensors="""tf""" ).input_ids
lowercase__ = model.generate(_UpperCAmelCase , max_length=10 )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """facebook/opt-350m"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
lowercase__ = """left"""
# use different length sentences to test batching
lowercase__ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowercase__ = tokenizer(_UpperCAmelCase , return_tensors="""tf""" , padding=_UpperCAmelCase )
lowercase__ = inputs["""input_ids"""]
lowercase__ = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs["""attention_mask"""] )
lowercase__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowercase__ = model.generate(input_ids=_UpperCAmelCase )
lowercase__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowercase__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowercase__ = model.generate(input_ids=_UpperCAmelCase , max_length=model.config.max_length - num_paddings )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
lowercase__ = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
lowercase__ = """facebook/opt-350m"""
lowercase__ = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowercase__ = []
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
for prompt in self.prompts:
lowercase__ = tokenizer(_UpperCAmelCase , return_tensors="""tf""" ).input_ids
lowercase__ = model.generate(_UpperCAmelCase , max_length=10 )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 305 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowerCamelCase : str = parser.parse_args()
lowerCamelCase : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCamelCase : List[str] = CLIPImageProcessor()
lowerCamelCase : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowerCamelCase : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 208 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(A_ )
def a__ ( self : Optional[Any] , A_ : int , A_ : str = "pytorch" ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = os.path.join(A_ , 'output' )
lowerCamelCase_ = os.path.join(A_ , 'data' )
self._create_dummy_data(data_dir=A_ )
lowerCamelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
lowerCamelCase_ = os.path.join(A_ , 'metrics.json' )
with open(A_ ) as f:
lowerCamelCase_ = json.load(A_ )
return result
@require_torch_gpu
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 208 | 1 |
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 165 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : str ,lowerCamelCase : Optional[int] ):
# Initialise PyTorch model
_A : str = RemBertConfig.from_json_file(lowerCamelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCamelCase ) ) )
_A : Union[str, Any] = RemBertModel(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCamelCase ) )
torch.save(model.state_dict() ,lowerCamelCase )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A : Union[str, Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 227 |
'''simple docstring'''
import operator as op
A : List[str] = '''scaler.pt'''
A : Optional[int] = '''pytorch_model'''
A : Union[str, Any] = '''random_states'''
A : List[Any] = '''optimizer'''
A : Optional[Any] = '''scheduler'''
A : Optional[int] = '''pytorch_model.bin'''
A : List[str] = '''pytorch_model.bin.index.json'''
A : List[Any] = '''model.safetensors'''
A : int = '''model.safetensors.index.json'''
A : List[str] = '''1.10.2'''
A : str = '''py38'''
A : int = '''4.17.0'''
A : Tuple = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
A : Union[str, Any] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
A : Optional[Any] = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
A : Dict = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
A : Any = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
A : Tuple = '''2.0.1'''
A : List[str] = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
A : Union[str, Any] = ['''default''', '''reduce-overhead''', '''max-autotune''']
A : List[Any] = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
A : Tuple = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
A : List[str] = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
A : Any = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 227 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ = data_utils.TransfoXLTokenizer
lowercase_ = data_utils.TransfoXLCorpus
lowercase_ = data_utils
lowercase_ = data_utils
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : int = pickle.load(snake_case , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__SCREAMING_SNAKE_CASE : str = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
__SCREAMING_SNAKE_CASE : List[str] = corpus.vocab.__dict__
torch.save(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : int = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(snake_case , snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__SCREAMING_SNAKE_CASE : Any = os.path.abspath(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath(snake_case )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__SCREAMING_SNAKE_CASE : int = TransfoXLConfig()
else:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLConfig.from_json_file(snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : Tuple = TransfoXLLMHeadModel(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = load_tf_weights_in_transfo_xl(snake_case , snake_case , snake_case )
# Save pytorch-model
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : int = os.path.join(snake_case , snake_case )
print(F'''Save PyTorch model to {os.path.abspath(snake_case )}''' )
torch.save(model.state_dict() , snake_case )
print(F'''Save configuration file to {os.path.abspath(snake_case )}''' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowercase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 303 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=False , ) -> Dict:
a =size if size is not None else {'''height''': 20, '''width''': 20}
a =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
a =parent
a =batch_size
a =num_channels
a =image_size
a =min_resolution
a =max_resolution
a =do_resize
a =size
a =do_center_crop
a =crop_size
a =do_normalize
a =image_mean
a =image_std
a =do_reduce_labels
def SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _A ( ):
"""simple docstring"""
a =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
a =Image.open(dataset[0]['''file'''] )
a =Image.open(dataset[1]['''file'''] )
return image, map
def _A ( ):
"""simple docstring"""
a =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
a =Image.open(ds[0]['''file'''] )
a =Image.open(ds[1]['''file'''] )
a =Image.open(ds[2]['''file'''] )
a =Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = BeitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =BeitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
self.assertTrue(hasattr(__A , '''do_center_crop''' ) )
self.assertTrue(hasattr(__A , '''center_crop''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __A )
a =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self ) -> str:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> int:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
a =[]
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
a =image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
a =image_processing(__A , __A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
a , a =prepare_semantic_single_inputs()
a =image_processing(__A , __A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
a , a =prepare_semantic_batch_inputs()
a =image_processing(__A , __A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def SCREAMING_SNAKE_CASE ( self ) -> str:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
a , a =prepare_semantic_single_inputs()
a =image_processing(__A , __A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
a =True
a =image_processing(__A , __A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 ) | 215 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "WhisperFeatureExtractor"
__lowerCAmelCase = "WhisperTokenizer"
def __init__( self , __A , __A ) -> Dict:
super().__init__(__A , __A )
a =self.feature_extractor
a =False
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a =kwargs.pop('''audio''' , __A )
a =kwargs.pop('''sampling_rate''' , __A )
a =kwargs.pop('''text''' , __A )
if len(__A ) > 0:
a =args[0]
a =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a =self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
a =self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a =encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]:
return self.tokenizer.decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(__A , return_tensors=__A ) | 215 | 1 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = sum(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Optional[int] = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : Tuple = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Optional[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : Optional[Any] = s - 2 * j
break
return diff
| 215 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (IPNDMScheduler,)
UpperCAmelCase = (("""num_inference_steps""", 50),)
def _snake_case ( self ,**a_ ) -> Optional[int]:
_UpperCAmelCase : str = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def _snake_case ( self ,a_=0 ,**a_ ) -> List[str]:
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : Any = kwargs.pop("""num_inference_steps""" ,a_ )
_UpperCAmelCase : Union[str, Any] = self.dummy_sample
_UpperCAmelCase : Union[str, Any] = 0.1 * sample
_UpperCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.get_scheduler_config(**a_ )
_UpperCAmelCase : List[str] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_UpperCAmelCase : str = dummy_past_residuals[:]
if time_step is None:
_UpperCAmelCase : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_UpperCAmelCase : List[str] = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_UpperCAmelCase : Tuple = dummy_past_residuals[:]
_UpperCAmelCase : Dict = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_UpperCAmelCase : str = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ,a_=0 ,**a_ ) -> Dict:
_UpperCAmelCase : str = dict(self.forward_default_kwargs )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" ,a_ )
_UpperCAmelCase : Optional[int] = self.dummy_sample
_UpperCAmelCase : Tuple = 0.1 * sample
_UpperCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
if time_step is None:
_UpperCAmelCase : Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : Dict = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : str = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_UpperCAmelCase : str = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : str = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ,**a_ ) -> List[Any]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config(**a_ )
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
_UpperCAmelCase : List[Any] = 10
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Optional[Any] = model(a_ ,a_ )
_UpperCAmelCase : Tuple = scheduler.step(a_ ,a_ ,a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : List[str] = model(a_ ,a_ )
_UpperCAmelCase : int = scheduler.step(a_ ,a_ ,a_ ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" ,a_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**a_ )
_UpperCAmelCase : Optional[int] = self.dummy_sample
_UpperCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ ,"""set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ ,"""set_timesteps""" ):
_UpperCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.timesteps[5]
_UpperCAmelCase : Optional[Any] = scheduler.timesteps[6]
_UpperCAmelCase : Optional[Any] = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Optional[int] = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
_UpperCAmelCase : Dict = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _snake_case ( self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ ,time_step=a_ )
def _snake_case ( self ) -> int:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_ ,time_step=a_ )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.full_loop()
_UpperCAmelCase : Any = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 215 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A : Dict = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 360 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A : str = parser.parse_args()
if args.model_type == "roberta":
A : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
A : Any = 'roberta'
elif args.model_type == "gpt2":
A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
A : List[str] = 'transformer'
A : Dict = model.state_dict()
A : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A : Union[str, Any] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A : Any = F"{prefix}.embeddings.{w}.weight"
A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
A : List[Any] = F"{prefix}.embeddings.LayerNorm.{w}"
A : List[str] = state_dict[param_name]
# Transformer Blocks #
A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
A : List[str] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A : Optional[int] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F"lm_head.dense.{w}"]
A : List[str] = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A : List[str] = state_dict[F"{prefix}.ln_f.{w}"]
A : Dict = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint) | 33 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[int] = '''dpr'''
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__ = 0 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = projection_dim
__SCREAMING_SNAKE_CASE = position_embedding_type
| 100 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = GPTaTokenizer
UpperCAmelCase__ : str = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = {"add_prefix_space": True}
UpperCAmelCase__ : int = False
def __lowercase ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_a : List[Any] = dict(zip(_a , range(len(_a ) ) ) )
_a : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a : List[str] = {'''unk_token''': '''<unk>'''}
_a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __lowercase ( self , **_a ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : Tuple = '''lower newer'''
_a : Tuple = '''lower newer'''
return input_text, output_text
def __lowercase ( self ) -> Any:
_a : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a : int = '''lower newer'''
_a : int = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a : List[str] = tokenizer.tokenize(_a , add_prefix_space=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokens + [tokenizer.unk_token]
_a : List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def __lowercase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_a : int = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer(add_prefix_space=_a )
_a : Tuple = '''lower newer'''
# Testing tokenization
_a : List[str] = tokenizer.tokenize(_a , add_prefix_space=_a )
_a : Optional[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids without special tokens
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
_a : List[Any] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids with special tokens
_a : List[str] = self.get_rust_tokenizer(add_prefix_space=_a )
_a : List[str] = tokenizer.encode(_a , add_prefix_space=_a )
_a : Tuple = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# Testing the unknown token
_a : Optional[Any] = tokens + [rust_tokenizer.unk_token]
_a : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a )
def __lowercase ( self , *_a , **_a ) -> int:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowercase ( self , _a=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Optional[int] = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
_a : List[str] = '''This is a simple input'''
_a : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_a : Tuple = ('''This is a simple input''', '''This is a pair''')
_a : Optional[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , )
def __lowercase ( self ) -> List[Any]:
_a : int = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_a : int = '''This is a simple input'''
_a : int = ['''This is a simple input looooooooong''', '''This is a simple input''']
_a : Any = ('''This is a simple input''', '''This is a pair''')
_a : Dict = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_a : Optional[int] = tokenizer.pad_token_id
_a : List[Any] = tokenizer(_a , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_a : Optional[int] = tokenizer(_a , padding=_a , truncate=_a , return_tensors='''np''' )
_a : int = tokenizer(*_a , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_a : Union[str, Any] = tokenizer(_a , padding=_a , truncate=_a , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = '''$$$'''
_a : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a )
_a : Any = '''This is a simple input'''
_a : str = ['''This is a simple input 1''', '''This is a simple input 2''']
_a : Tuple = tokenizer.bos_token_id
_a : Optional[Any] = tokenizer(_a )
_a : str = tokenizer(_a )
self.assertEqual(out_s.input_ids[0] , _a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_a : str = tokenizer.decode(out_s.input_ids )
_a : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowercase ( self ) -> str:
pass
def __lowercase ( self ) -> Dict:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_a : Optional[int] = [self.get_tokenizer(do_lower_case=_a , add_bos_token=_a )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : Tuple = '''Encode this.'''
_a : Optional[Any] = '''This one too please.'''
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
encoded_sequence += tokenizer.encode(_a , add_special_tokens=_a )
_a : List[str] = tokenizer.encode_plus(
_a , _a , add_special_tokens=_a , return_special_tokens_mask=_a , )
_a : int = encoded_sequence_dict['''input_ids''']
_a : int = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(_a ) , len(_a ) )
_a : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_a )
]
_a : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(_a , _a )
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_a : Any = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_a )
_a : int = '''A photo of a cat'''
_a : List[Any] = tokenizer.encode(
_a , )
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_a : Union[str, Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
_a : Any = tokenizer.encode(
_a , )
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __lowercase ( self ) -> int:
_a : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=_a )
_a : Any = '''A photo of a cat'''
_a : Optional[int] = tokenizer.encode(
_a , )
# Same as above
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_a )
_a : Optional[Any] = '''bos'''
_a : Optional[Any] = tokenizer.get_vocab()['''bos''']
_a : str = '''A photo of a cat'''
_a : int = tokenizer.encode(
_a , )
# We changed the bos token
self.assertEqual(_a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_a : Dict = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_a : Union[str, Any] = tokenizer.encode(
_a , )
self.assertEqual(_a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 235 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=False , __magic_name__=True , __magic_name__="None" , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Any:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def __UpperCAmelCase ( self ) -> List[str]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_config()
_a = 3_00
return config
def __UpperCAmelCase ( self , __magic_name__ ) -> Dict:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = DebertaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = DebertaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = self.num_labels
_a = DebertaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = self.num_labels
_a = DebertaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = DebertaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> Any:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> List[str]:
_a = DebertaModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DebertaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ) -> Dict:
pass
@slow
def __UpperCAmelCase ( self ) -> int:
_a = DebertaModel.from_pretrained('microsoft/deberta-base' )
_a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
# compare the actual values for a slice.
_a = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 104 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( A : str ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = [0] * len(A )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Optional[int] = [1] * len(A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A ) ):
if indegree[i] == 0:
queue.append(A )
while queue:
UpperCAmelCase_ : Any = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(A )
print(max(A ) )
# Adjacency list of Graph
_UpperCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 304 |
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _snake_case ( lowercase__):
UpperCamelCase__ : str =["""image_processor""", """tokenizer"""]
UpperCamelCase__ : Union[str, Any] ="""OwlViTImageProcessor"""
UpperCamelCase__ : Union[str, Any] =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : str, __lowercase : int=None, __lowercase : str=None, **__lowercase : str ):
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", __lowercase, )
lowercase__ = kwargs.pop("feature_extractor" )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowercase, __lowercase )
def __call__( self : Any, __lowercase : Optional[Any]=None, __lowercase : List[Any]=None, __lowercase : Optional[int]=None, __lowercase : Dict="max_length", __lowercase : Any="np", **__lowercase : Optional[Any] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__lowercase, __lowercase ) or (isinstance(__lowercase, __lowercase ) and not isinstance(text[0], __lowercase )):
lowercase__ = [self.tokenizer(__lowercase, padding=__lowercase, return_tensors=__lowercase, **__lowercase )]
elif isinstance(__lowercase, __lowercase ) and isinstance(text[0], __lowercase ):
lowercase__ = []
# Maximum number of queries across batch
lowercase__ = max([len(__lowercase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowercase ) != max_num_queries:
lowercase__ = t + [" "] * (max_num_queries - len(__lowercase ))
lowercase__ = self.tokenizer(__lowercase, padding=__lowercase, return_tensors=__lowercase, **__lowercase )
encodings.append(__lowercase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowercase__ = np.concatenate([encoding["input_ids"] for encoding in encodings], axis=0 )
lowercase__ = np.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase__ = jnp.concatenate([encoding["input_ids"] for encoding in encodings], axis=0 )
lowercase__ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase__ = torch.cat([encoding["input_ids"] for encoding in encodings], dim=0 )
lowercase__ = torch.cat([encoding["attention_mask"] for encoding in encodings], dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase__ = tf.stack([encoding["input_ids"] for encoding in encodings], axis=0 )
lowercase__ = tf.stack([encoding["attention_mask"] for encoding in encodings], axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowercase__ = BatchEncoding()
lowercase__ = input_ids
lowercase__ = attention_mask
if query_images is not None:
lowercase__ = BatchEncoding()
lowercase__ = self.image_processor(
__lowercase, return_tensors=__lowercase, **__lowercase ).pixel_values
lowercase__ = query_pixel_values
if images is not None:
lowercase__ = self.image_processor(__lowercase, return_tensors=__lowercase, **__lowercase )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ), tensor_type=__lowercase )
def A__ ( self : Any, *__lowercase : List[str], **__lowercase : Optional[int] ):
return self.image_processor.post_process(*__lowercase, **__lowercase )
def A__ ( self : List[str], *__lowercase : List[Any], **__lowercase : List[str] ):
return self.image_processor.post_process_object_detection(*__lowercase, **__lowercase )
def A__ ( self : List[Any], *__lowercase : List[Any], **__lowercase : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__lowercase, **__lowercase )
def A__ ( self : Dict, *__lowercase : Union[str, Any], **__lowercase : str ):
return self.tokenizer.batch_decode(*__lowercase, **__lowercase )
def A__ ( self : List[Any], *__lowercase : Optional[Any], **__lowercase : List[Any] ):
return self.tokenizer.decode(*__lowercase, **__lowercase )
@property
def A__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", __lowercase, )
return self.image_processor_class
@property
def A__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", __lowercase, )
return self.image_processor
| 351 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 224 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["sentencepiece"]
def __init__( self : Any , *A : Any , **A : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["sentencepiece"]
def __init__( self : Dict , *A : Optional[int] , **A : Union[str, Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["sentencepiece"]
def __init__( self : List[Any] , *A : List[Any] , **A : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["sentencepiece"]
def __init__( self : Optional[int] , *A : Optional[Any] , **A : int ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["sentencepiece"]
def __init__( self : List[str] , *A : Tuple , **A : str ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["sentencepiece"]
def __init__( self : Optional[int] , *A : Union[str, Any] , **A : Optional[int] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["sentencepiece"]
def __init__( self : Optional[Any] , *A : Union[str, Any] , **A : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["sentencepiece"]
def __init__( self : List[str] , *A : List[str] , **A : List[str] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["sentencepiece"]
def __init__( self : Optional[int] , *A : List[str] , **A : str ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["sentencepiece"]
def __init__( self : List[str] , *A : Optional[int] , **A : str ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["sentencepiece"]
def __init__( self : Optional[Any] , *A : str , **A : Any ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["sentencepiece"]
def __init__( self : Tuple , *A : Tuple , **A : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["sentencepiece"]
def __init__( self : str , *A : Tuple , **A : int ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["sentencepiece"]
def __init__( self : Dict , *A : Union[str, Any] , **A : Union[str, Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["sentencepiece"]
def __init__( self : str , *A : str , **A : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = ["sentencepiece"]
def __init__( self : Tuple , *A : Any , **A : int ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["sentencepiece"]
def __init__( self : Dict , *A : Dict , **A : Union[str, Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["sentencepiece"]
def __init__( self : Dict , *A : List[Any] , **A : str ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["sentencepiece"]
def __init__( self : Tuple , *A : List[str] , **A : List[str] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["sentencepiece"]
def __init__( self : Union[str, Any] , *A : Tuple , **A : Union[str, Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["sentencepiece"]
def __init__( self : Any , *A : Optional[int] , **A : Tuple ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["sentencepiece"]
def __init__( self : Optional[int] , *A : int , **A : List[str] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["sentencepiece"]
def __init__( self : Any , *A : Tuple , **A : int ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["sentencepiece"]
def __init__( self : Dict , *A : Dict , **A : Union[str, Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["sentencepiece"]
def __init__( self : Dict , *A : Union[str, Any] , **A : Tuple ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["sentencepiece"]
def __init__( self : Any , *A : List[Any] , **A : str ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = ["sentencepiece"]
def __init__( self : Dict , *A : Tuple , **A : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["sentencepiece"]
def __init__( self : int , *A : Tuple , **A : Optional[int] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["sentencepiece"]
def __init__( self : List[Any] , *A : Tuple , **A : Dict ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = ["sentencepiece"]
def __init__( self : List[Any] , *A : Optional[int] , **A : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["sentencepiece"]
def __init__( self : List[Any] , *A : Any , **A : int ):
requires_backends(self , ["sentencepiece"] )
| 31 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : list ) -> list:
"""simple docstring"""
_UpperCAmelCase : List[Any] = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 31 | 1 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : Optional[int] , a_ : List[str] , a_ : str ) -> Union[str, Any]:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE :Dict = TaConfig.from_json_file(a_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE :int = TaForConditionalGeneration(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a_ , a_ , a_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]:
return sorted(a_ , key=lambda a_ : x[column] )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :Optional[Any] = current_dis
return min_dis
def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :int = current_dis
return min_dis
def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
__SCREAMING_SNAKE_CASE :int = points_counts // 2
__SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
__SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
__SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 )
__SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points))) | 239 | 1 |
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE__ = num - 1
SCREAMING_SNAKE_CASE__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 165 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
SCREAMING_SNAKE_CASE__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__UpperCAmelCase )
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""nielsr/rvlcdip-demo""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 165 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = IFPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self : int ) -> int:
"""simple docstring"""
return self._get_dummy_components()
def a__ ( self : int , A_ : Optional[int] , A_ : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ ( self : int ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
lowerCamelCase_ = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
lowerCamelCase_ , lowerCamelCase_ = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCamelCase_ = None
lowerCamelCase_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCamelCase_ = IFImgaImgPipeline(**pipe_a.components )
lowerCamelCase_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCamelCase_ = IFInpaintingPipeline(**pipe_a.components )
lowerCamelCase_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def a__ ( self : List[Any] , A_ : List[Any] , A_ : Optional[Any] , A_ : int , A_ : int ) -> Any:
"""simple docstring"""
_start_torch_memory_measurement()
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
lowerCamelCase_ = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(A_ , A_ )
def a__ ( self : str , A_ : Union[str, Any] , A_ : List[str] , A_ : Optional[int] , A_ : List[str] ) -> List[Any]:
"""simple docstring"""
_start_torch_memory_measurement()
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
lowerCamelCase_ = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(A_ , A_ )
def a__ ( self : Optional[int] , A_ : str , A_ : List[Any] , A_ : int , A_ : Any ) -> List[str]:
"""simple docstring"""
_start_torch_memory_measurement()
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(A_ )
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(A_ )
lowerCamelCase_ = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(A_ , A_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 208 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase : Optional[int] = float("nan")
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = sys.stdout
lowerCamelCase_ = open(A_ , 'a' )
def __getattr__( self : List[Any] , A_ : Optional[int] ) -> str:
"""simple docstring"""
return getattr(self.stdout , A_ )
def a__ ( self : int , A_ : int ) -> List[str]:
"""simple docstring"""
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , A_ , 0 , re.M ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str=80 , lowercase : Tuple=False ):
'''simple docstring'''
lowerCamelCase_ = []
# deal with critical env vars
lowerCamelCase_ = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
lowerCamelCase_ = os.environ.get(lowercase , lowercase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowerCamelCase_ = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase_ = []
lowerCamelCase_ = ''
while len(lowercase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(lowercase ) == 0 or len(lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase )
lowerCamelCase_ = ''
return "\\\n".join(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase_ = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowerCamelCase_ = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : int , lowercase : Dict , lowercase : List[str] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
lowerCamelCase_ = subprocess.run(lowercase , capture_output=lowercase , text=lowercase )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
lowerCamelCase_ = variation.replace(' ' , '-' )
with open(Path(lowercase ) / f"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(lowercase ) / f"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.load(lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Dict , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Dict , lowercase : Any , lowercase : int , ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = f"""{id}: {variation:<{longest_variation_len}}"""
lowerCamelCase_ = f"""{preamble}: """
lowerCamelCase_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase ) , desc=lowercase , leave=lowercase ):
lowerCamelCase_ = process_run_single(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase ):
metrics.append(lowercase )
results.append(lowercase )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase_ = f"""\33[2K\r{outcome}"""
if len(lowercase ) > 0:
lowerCamelCase_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase_ = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase_ = f"""{outcome} {mean_target}"""
if len(lowercase ) > 1:
results_str += f""" {tuple(round(lowercase , 2 ) for x in results )}"""
print(lowercase )
lowerCamelCase_ = variation
return mean_metrics
else:
print(lowercase )
return {variation_key: variation, target_metric_key: nan}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = pd.DataFrame(lowercase )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = 'diff_%'
lowerCamelCase_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase ):
lowerCamelCase_ = df.apply(
lambda lowercase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
lowerCamelCase_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase_ = df.reindex(lowercase , axis='columns' ) # reorder cols
# capitalize
lowerCamelCase_ = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '<br>' ) , axis='columns' )
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '\n' ) , axis='columns' )
lowerCamelCase_ = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase , floatfmt='.2f' )]
print('\n\n'.join(lowercase ) )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=lowercase , type=lowercase , required=lowercase , help='Base cmd' , )
parser.add_argument(
'--variations' , default=lowercase , type=lowercase , nargs='+' , required=lowercase , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=lowercase , type=lowercase , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=lowercase , type=lowercase , required=lowercase , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=lowercase , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=lowercase , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=lowercase , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=lowercase , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.output_dir
Path(lowercase ).mkdir(exist_ok=lowercase )
lowerCamelCase_ = get_base_command(lowercase , lowercase )
# split each dimension into its --foo variations
lowerCamelCase_ = [list(map(str.strip , re.split(r'\|' , lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase_ = list(map(str.strip , map(' '.join , itertools.product(*lowercase ) ) ) )
lowerCamelCase_ = max(len(lowercase ) for x in variations )
# split wanted keys
lowerCamelCase_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase_ = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
lowerCamelCase_ = Tee(lowercase )
print(f"""\n*** Running {len(lowercase )} benchmarks:""" )
print(f"""Base command: {" ".join(lowercase )}""" )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = []
for id, variation in enumerate(tqdm(lowercase , desc='Total completion: ' , leave=lowercase ) ):
lowerCamelCase_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase , lowercase , lowercase , lowercase , args.target_metric_key , lowercase , args.repeat_times , lowercase , args.verbose , ) )
process_results(lowercase , args.target_metric_key , lowercase , args.base_variation , lowercase )
if __name__ == "__main__":
main()
| 208 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *_lowerCamelCase , **_lowerCamelCase ):
pass
def __lowerCamelCase ( UpperCAmelCase_ : Tuple ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case : Union[str, Any] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = pipeline(
'''document-question-answering''' , model=a_ , tokenizer=a_ , image_processor=a_ )
a :Any = INVOICE_URL
a :List[str] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) )
a :Any = '''What is the placebo?'''
a :Any = [
{
'''image''': load_image(a_ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :Optional[Any] = dqa_pipeline(a_ , top_k=2 )
self.assertEqual(
a_ , [
[
{'''score''': ANY(a_ ), '''answer''': ANY(a_ ), '''start''': ANY(a_ ), '''end''': ANY(a_ )},
{'''score''': ANY(a_ ), '''answer''': ANY(a_ ), '''start''': ANY(a_ ), '''end''': ANY(a_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
a :Union[str, Any] = INVOICE_URL
a :Tuple = '''How many cats are there?'''
a :Tuple = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
a :Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(nested_simplify(a_ , decimals=4 ) , a_ )
a :int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a_ , decimals=4 ) , a_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a :List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
a :List[Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(a_ , [] )
# We can optionnally pass directly the words and bounding boxes
a :Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
a :Union[str, Any] = []
a :int = []
a :List[str] = dqa_pipeline(image=a_ , question=a_ , words=a_ , boxes=a_ , top_k=2 )
self.assertEqual(a_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
a :str = INVOICE_URL
a :Any = '''What is the invoice number?'''
a :Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
a :Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
a :Dict = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
a :str = INVOICE_URL
a :List[str] = '''What is the invoice number?'''
a :Tuple = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
a :str = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
a :Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a_ )
a :int = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a_ , revision='''3dc6de3''' , )
a :int = INVOICE_URL
a :List[str] = '''What is the invoice number?'''
a :Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
a :Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
a :Tuple = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
a :Union[str, Any] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) )
# This model should also work if `image` is set to None
a :Dict = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a_ )
a :int = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a_ , revision='''3dc6de3''' , max_seq_len=50 , )
a :int = INVOICE_URL
a :Optional[int] = '''What is the invoice number?'''
a :str = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
a :List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
a :Union[str, Any] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) )
# This model should also work if `image` is set to None
a :Any = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
a :int = INVOICE_URL
a :Optional[Any] = '''What is the invoice number?'''
a :Optional[int] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(nested_simplify(a_ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 352 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case : Any = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
inspect_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
a :List[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ):
"""simple docstring"""
inspect_metric(UpperCAmelCase_ , UpperCAmelCase_ )
a :Dict = path + '''.py'''
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
a :List[str] = get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
with pytest.raises(UpperCAmelCase_ ):
get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[str] = get_dataset_config_names(UpperCAmelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Optional[int] = get_dataset_infos(UpperCAmelCase_ )
assert list(infos.keys() ) == expected_configs
a :Union[str, Any] = expected_configs[0]
assert expected_config in infos
a :List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Union[str, Any] = get_dataset_infos(UpperCAmelCase_ )
assert expected_config in infos
a :int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
with pytest.raises(UpperCAmelCase_ ):
get_dataset_split_names(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
| 281 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
# setable values
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None # sigma(t_i)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]:
return cls()
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
class UpperCamelCase_ (__A , __A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return True
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return KarrasVeSchedulerState.create()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
UpperCAmelCase_ : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 )
UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma
UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
raise NotImplementedError()
| 268 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def snake_case ( A__ ,A__ ,A__ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase_ : Dict = (low + high) // 2
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1
UpperCAmelCase_ : int | float = 0
for i in range(A__ ,low - 1 ,-1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase_ : str = summ
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : Dict = 0
for i in range(mid + 1 ,high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase_ : List[Any] = summ
UpperCAmelCase_ : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def snake_case ( A__ ):
UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )]
UpperCAmelCase_ : str = time.time()
max_subarray(A__ ,0 ,input_size - 1 )
UpperCAmelCase_ : int = time.time()
return end - start
def snake_case ( ):
UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(A__ ,A__ ):
print(A__ ,"\t\t" ,A__ )
plt.plot(A__ ,A__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 268 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
return (-y * np.log(UpperCamelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
_snake_case = np.dot(UpperCamelCase__ , UpperCamelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase__ ) ) )
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any=70_000 ) -> Tuple:
'''simple docstring'''
_snake_case = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase__ ):
_snake_case = np.dot(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = sigmoid_function(UpperCamelCase__ )
_snake_case = np.dot(x.T , h - y ) / y.size
_snake_case = theta - alpha * gradient # updating the weights
_snake_case = np.dot(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = sigmoid_function(UpperCamelCase__ )
_snake_case = cost_function(UpperCamelCase__ , UpperCamelCase__ )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase_ = datasets.load_iris()
UpperCAmelCase_ = iris.data[:, :2]
UpperCAmelCase_ = (iris.target != 0) * 1
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = logistic_reg(alpha, x, y, max_iterations=70000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
return sigmoid_function(
np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((UpperCAmelCase_) , (UpperCAmelCase_)) = (x[:, 0].min(), x[:, 0].max())
((UpperCAmelCase_) , (UpperCAmelCase_)) = (x[:, 1].min(), x[:, 1].max())
((UpperCAmelCase_) , (UpperCAmelCase_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase_ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 295 |
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : Dict = ['a', 'b', 'c']
# Defaults to last layer if both are None
a_ , a_ : List[str] = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
a_ , a_ : str = get_aligned_output_features_output_indices(['a', 'c'] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
a_ , a_ : int = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
a_ , a_ : Dict = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
# Stage names must be set
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Tuple = BackboneMixin()
a_ : Optional[int] = ['a', 'b', 'c']
a_ : List[str] = ['a', 'c']
a_ : Optional[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a_ : Dict = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
a_ : List[str] = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 32 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
snake_case__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : List[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
a_ : int = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
a_ : Tuple = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
a_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
a_ : Tuple = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
a_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
a_ : List[str] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
a_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
a_ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
import torch
a_ : List[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
a_ : Any = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : List[str] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
a_ : Optional[int] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : List[str] = pipeline('text-classification' )
a_ : Dict = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
a_ : Union[str, Any] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
a_ : Tuple = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Dict = pipeline('text-classification' , framework='tf' )
a_ : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
a_ : int = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
a_ : Optional[int] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
a_ : Optional[Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
a_ : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a_ : Union[str, Any] = 'HuggingFace is in'
a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
a_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France']
a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a_ : List[Any] = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ )
a_ : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , )
a_ : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
a_ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a_ : Any = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
text_classifier(SCREAMING_SNAKE_CASE__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 32 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = "▁"
_UpperCamelCase = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCamelCase = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_UpperCamelCase = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowercase ( snake_case__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
UpperCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ = 1
UpperCAmelCase__ = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ (self , __a , __a = None ) -> Optional[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ (self , __a , __a = None , __a = False ) -> Any:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def UpperCamelCase__ (self , __a , __a = None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCamelCase__ (self , __a ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = "".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ' ' ).strip()
return out_string
def UpperCamelCase__ (self , __a , __a = None ) -> Optional[Any]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , 'wb' ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 365 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335 | 0 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : List[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
snake_case : Any = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
snake_case : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=True , _a=False ):
if rouge_types is None:
__magic_name__ : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__magic_name__ : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
__magic_name__ : Dict = scoring.BootstrapAggregator()
else:
__magic_name__ : str = []
for ref, pred in zip(_a , _a ):
__magic_name__ : Union[str, Any] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
__magic_name__ : Any = aggregator.aggregate()
else:
__magic_name__ : List[Any] = {}
for key in scores[0]:
__magic_name__ : str = [score[key] for score in scores]
return result
| 281 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : int=None ,lowercase__ : str=None ,lowercase__ : List[Any]=None ,lowercase__ : Dict=None ,lowercase__ : Tuple=None ):
self.set_matricies(red=lowercase__ ,green=lowercase__ ,blue=lowercase__ ,red_edge=lowercase__ ,nir=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Dict=None ,lowercase__ : List[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Union[str, Any]=None ):
if red is not None:
__lowercase = red
if green is not None:
__lowercase = green
if blue is not None:
__lowercase = blue
if red_edge is not None:
__lowercase = red_edge
if nir is not None:
__lowercase = nir
return True
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int="" ,lowercase__ : Any=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : int=None ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=None ):
self.set_matricies(red=lowercase__ ,green=lowercase__ ,blue=lowercase__ ,red_edge=lowercase__ ,nir=lowercase__ )
__lowercase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE ( self : int ):
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE ( self : Dict ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE ( self : Dict ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str]=0.0_8 ,lowercase__ : Any=1.2_2 ,lowercase__ : Dict=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE ( self : Any ):
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.nir - self.green
def SCREAMING_SNAKE_CASE ( self : Dict ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[str]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict=None ,lowercase__ : int=None ):
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return (self.red + self.green + self.blue) / 3_0.5
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE ( self : int ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE ( self : str ):
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowercase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE ( self : Any ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE ( self : int ):
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 52 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.