code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (UpperCamelCase__ ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 309
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a__: Optional[int] = logging.get_logger(__name__)
a__: int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__: Optional[Any] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a__: List[str] = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
a__: Optional[Any] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BertTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase="[UNK]",__lowerCamelCase="[SEP]",__lowerCamelCase="[PAD]",__lowerCamelCase="[CLS]",__lowerCamelCase="[MASK]",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,):
super().__init__(
__lowerCamelCase,tokenizer_file=__lowerCamelCase,do_lower_case=__lowerCamelCase,unk_token=__lowerCamelCase,sep_token=__lowerCamelCase,pad_token=__lowerCamelCase,cls_token=__lowerCamelCase,mask_token=__lowerCamelCase,tokenize_chinese_chars=__lowerCamelCase,strip_accents=__lowerCamelCase,**__lowerCamelCase,)
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''',__lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''',__lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''',__lowerCamelCase ) != tokenize_chinese_chars
):
A__ = getattr(__lowerCamelCase,normalizer_state.pop('''type''' ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**__lowerCamelCase )
A__ = do_lower_case
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = self._tokenizer.model.save(__lowerCamelCase,name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 193
| 0
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
return math.pow(lowerCamelCase__ , 2 ) - a
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
return 2 * x
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: str = 2.0
while start <= a:
lowercase__: List[Any] = math.pow(lowerCamelCase__ , 2 )
return start
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 9_9_9_9 , __UpperCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> Union[str, Any]:
if a < 0:
raise ValueError('''math domain error''' )
lowercase__: Optional[int] = get_initial_point(lowerCamelCase__ )
for _ in range(lowerCamelCase__ ):
lowercase__: Any = value
lowercase__: int = value - fx(lowerCamelCase__ , lowerCamelCase__ ) / fx_derivative(lowerCamelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase_ = {"unk_token": "<unk>"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCamelCase ) )
lowerCamelCase_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **_lowerCamelCase )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **_lowerCamelCase )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
lowerCamelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase_ = self.get_image_processor(do_normalize=_lowerCamelCase )
lowerCamelCase_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(_lowerCamelCase , return_tensors="np" )
lowerCamelCase_ = processor(images=_lowerCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = processor(text=_lowerCamelCase , return_tensors="np" )
lowerCamelCase_ = tokenizer(_lowerCamelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "google/owlvit-base-patch32"
lowerCamelCase_ = OwlViTProcessor.from_pretrained(_lowerCamelCase )
lowerCamelCase_ = ["cat", "nasa badge"]
lowerCamelCase_ = processor(text=_lowerCamelCase )
lowerCamelCase_ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "google/owlvit-base-patch32"
lowerCamelCase_ = OwlViTProcessor.from_pretrained(_lowerCamelCase )
lowerCamelCase_ = [["cat", "nasa badge"], ["person"]]
lowerCamelCase_ = processor(text=_lowerCamelCase )
lowerCamelCase_ = 16
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = max([len(_lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "google/owlvit-base-patch32"
lowerCamelCase_ = OwlViTProcessor.from_pretrained(_lowerCamelCase )
lowerCamelCase_ = ["cat", "nasa badge"]
lowerCamelCase_ = processor(text=_lowerCamelCase )
lowerCamelCase_ = 16
lowerCamelCase_ = inputs["input_ids"]
lowerCamelCase_ = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(images=_lowerCamelCase , query_images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(_lowerCamelCase )
lowerCamelCase_ = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 55
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
_snake_case = checkpoints.load_tax_checkpoint(__lowerCamelCase )
_snake_case = flatten_dict(__lowerCamelCase )
return flax_params
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]:
_snake_case = {}
_snake_case = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
_snake_case = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_snake_case = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase )
_snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase )
_snake_case = flax_dict[key]
_snake_case = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_snake_case = torch.from_numpy(converted_dict[key].T )
else:
_snake_case = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False ) -> int:
_snake_case = get_flax_param(__lowerCamelCase )
if not use_large:
_snake_case = PixaStructVisionConfig()
_snake_case = PixaStructTextConfig()
else:
_snake_case = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
_snake_case = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
_snake_case = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowerCamelCase )
_snake_case = PixaStructForConditionalGeneration(__lowerCamelCase )
_snake_case = rename_and_convert_flax_params(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
_snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
_snake_case = PixaStructImageProcessor()
_snake_case = PixaStructProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase )
if use_large:
_snake_case = 40_96
_snake_case = True
# mkdir if needed
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
print('''Model saved in {}'''.format(__lowerCamelCase ) )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
UpperCAmelCase__ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 288
| 0
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE : List[Any] = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class UpperCamelCase ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ = " " ):
lowercase_ :Union[str, Any] = sentence_delimiter
def UpperCamelCase ( self , UpperCamelCase_ ):
return list(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = []
for sent_idx, sentence in enumerate(UpperCamelCase_ ):
chars.extend(self.process_string(UpperCamelCase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
SCREAMING_SNAKE_CASE : List[str] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE : Dict = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
SCREAMING_SNAKE_CASE : List[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
SCREAMING_SNAKE_CASE : str = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )["wer"]
lowercase_ :Tuple = 0
lowercase_ :Union[str, Any] = 0
for prediction, reference in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 252
|
def UpperCamelCase ( _a ) -> list:
'''simple docstring'''
if any(not isinstance(_a , _a ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(_a ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_a , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 252
| 1
|
'''simple docstring'''
__A = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _A ( lowercase__ ):
lowercase__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
lowercase__ = 0
lowercase__ = 0
while place < len(lowercase__ ):
if (place + 1 < len(lowercase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _A ( lowercase__ ):
lowercase__ = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) = divmod(lowercase__ , lowercase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class A ( __UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = """convnextv2"""
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="gelu" , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=0.0 , lowerCamelCase__=224 , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = num_stages
lowercase__ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowercase__ = [3, 3, 9, 3] if depths is None else depths
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = drop_path_rate
lowercase__ = image_size
lowercase__ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 164
| 1
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_lowercase : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase__ ( snake_case_ :Tuple ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :int , snake_case_ :str ):
return max(metric_fn(snake_case_ , snake_case_ ) for gt in ground_truths )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Optional[Any] , snake_case_ :Tuple ):
__UpperCAmelCase = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__UpperCAmelCase = []
if args.gold_data_mode == "qa":
__UpperCAmelCase = pd.read_csv(snake_case_ , sep='''\t''' , header=snake_case_ )
for answer_list in data[1]:
__UpperCAmelCase = ast.literal_eval(snake_case_ )
answers.append(snake_case_ )
else:
__UpperCAmelCase = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__UpperCAmelCase = [[reference] for reference in references]
__UpperCAmelCase = __UpperCAmelCase = __UpperCAmelCase = 0
for prediction, ground_truths in zip(snake_case_ , snake_case_ ):
total += 1
em += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
fa += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = 100.0 * em / total
__UpperCAmelCase = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Union[str, Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = args.k
__UpperCAmelCase = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__UpperCAmelCase = [line.strip() for line in open(snake_case_ , '''r''' ).readlines()]
__UpperCAmelCase = __UpperCAmelCase = 0
for hypo, reference in zip(snake_case_ , snake_case_ ):
__UpperCAmelCase = set(hypo.split('''\t''' )[:k] )
__UpperCAmelCase = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__UpperCAmelCase = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :Union[str, Any] ):
def strip_title(snake_case_ :Union[str, Any] ):
if title.startswith('''"''' ):
__UpperCAmelCase = title[1:]
if title.endswith('''"''' ):
__UpperCAmelCase = title[:-1]
return title
__UpperCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors='''pt''' , padding=snake_case_ , truncation=snake_case_ , )['''input_ids'''].to(args.device )
__UpperCAmelCase = rag_model.rag.question_encoder(snake_case_ )
__UpperCAmelCase = question_enc_outputs[0]
__UpperCAmelCase = rag_model.retriever(
snake_case_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
__UpperCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__UpperCAmelCase = []
for docs in all_docs:
__UpperCAmelCase = [strip_title(snake_case_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(snake_case_ ) )
return provenance_strings
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :str , snake_case_ :Optional[int] ):
with torch.no_grad():
__UpperCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors='''pt''' , padding=snake_case_ , truncation=snake_case_ )
__UpperCAmelCase = inputs_dict.input_ids.to(args.device )
__UpperCAmelCase = inputs_dict.attention_mask.to(args.device )
__UpperCAmelCase = rag_model.generate( # rag_model overwrites generate
snake_case_ , attention_mask=snake_case_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__UpperCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
if args.print_predictions:
for q, a in zip(snake_case_ , snake_case_ ):
logger.info('''Q: {} - A: {}'''.format(snake_case_ , snake_case_ ) )
return answers
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=snake_case_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=snake_case_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=snake_case_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=snake_case_ , type=snake_case_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=snake_case_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=snake_case_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=snake_case_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=snake_case_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=snake_case_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=snake_case_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=snake_case_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=snake_case_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=snake_case_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = {}
if args.model_type is None:
__UpperCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
__UpperCAmelCase = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
__UpperCAmelCase = args.n_docs
if args.index_name is not None:
__UpperCAmelCase = args.index_name
if args.index_path is not None:
__UpperCAmelCase = args.index_path
else:
__UpperCAmelCase = BartForConditionalGeneration
__UpperCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , snake_case_ )
__UpperCAmelCase = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
__UpperCAmelCase = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(snake_case_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
__UpperCAmelCase = RagRetriever.from_pretrained(snake_case_ , **snake_case_ )
__UpperCAmelCase = model_class.from_pretrained(snake_case_ , retriever=snake_case_ , **snake_case_ )
model.retriever.init_retrieval()
else:
__UpperCAmelCase = model_class.from_pretrained(snake_case_ , **snake_case_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
__UpperCAmelCase = []
for line in tqdm(snake_case_ ):
questions.append(line.strip() )
if len(snake_case_ ) == args.eval_batch_size:
__UpperCAmelCase = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write('''\n'''.join(snake_case_ ) + '''\n''' )
preds_file.flush()
__UpperCAmelCase = []
if len(snake_case_ ) > 0:
__UpperCAmelCase = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write('''\n'''.join(snake_case_ ) )
preds_file.flush()
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_lowercase : str = get_args()
main(args)
| 86
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowercase : str = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : int = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
__UpperCAmelCase = primes[group]['''prime''']
__UpperCAmelCase = primes[group]['''generator''']
__UpperCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def a ( self : int ):
return hex(self.__private_key )[2:]
def a ( self : Dict ):
__UpperCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(_lowercase )[2:]
def a ( self : Union[str, Any] , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = int(_lowercase , base=16 )
if not self.is_valid_public_key(_lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , self.__private_key , self.prime )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
@staticmethod
def a ( _lowercase : int , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowercase , (prime - 1) // 2 , _lowercase ) == 1
)
@staticmethod
def a ( _lowercase : str , _lowercase : str , _lowercase : int = 14 ):
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_lowercase , _lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , _lowercase , _lowercase )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 1
|
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> np.array:
'''simple docstring'''
UpperCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase__ = np.zeros((n + 1,) )
UpperCAmelCase__ = ya
UpperCAmelCase__ = xa
for k in range(__A ):
UpperCAmelCase__ = y[k] + step_size * ode_func(__A, y[k] )
UpperCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(__A, y[k] ) + ode_func(x + step_size, __A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
"""simple docstring"""
from math import pi, sqrt, tan
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
lowercase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(__lowerCAmelCase , 2 ) * torus_radius * tube_radius
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
lowercase_ = (sidea + sidea + sidea) / 2
lowercase_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 136
| 0
|
def UpperCamelCase( lowercase_ ) -> bool:
'''simple docstring'''
snake_case_ = 0
for ch in input_str:
snake_case_ = ord(lowercase_ )
snake_case_ = pow(2 , lowercase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = GPTNeoXJapaneseModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase )
snake_case_ = output_from_no_past["""hidden_states"""][0]
snake_case_ = model(
lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowerCamelCase_ : Any = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase_ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : Any = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Optional[int] = False
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = """abeja/gpt-neox-japanese-2.7b"""
snake_case_ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
snake_case_ = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(lowerCamelCase , return_tensors="""pt""" ).input_ids
snake_case_ = model.generate(lowerCamelCase , max_length=50 )
snake_case_ = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 34
| 0
|
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = 0
# Number of processes finished
A__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A__ = [0] * no_of_process
# List to include calculation results
A__ = [0] * no_of_process
# Sort by arrival time.
A__ = [burst_time[i] for i in np.argsort(lowercase_ )]
A__ = [process_name[i] for i in np.argsort(lowercase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
A__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A__ = arrival_time[i]
A__ = 0
# Index showing the location of the process being performed
A__ = 0
# Saves the current response ratio.
A__ = 0
for i in range(0 , lowercase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A__ = temp
A__ = i
# Calculate the turn around time
A__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = [0] * no_of_process
for i in range(0 , lowercase_ ):
A__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_lowerCamelCase : List[Any] = 5
_lowerCamelCase : Any = ["""A""", """B""", """C""", """D""", """E"""]
_lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5]
_lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5]
_lowerCamelCase : Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_lowerCamelCase : Tuple = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 14
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14
| 1
|
'''simple docstring'''
from collections import defaultdict
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_snake_case = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCAmelCase ) )
]
_snake_case = defaultdict(UpperCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_snake_case = (1 << len(UpperCAmelCase )) - 1
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_snake_case = self.count_ways_until(UpperCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_snake_case = total_ways_util
return self.dp[mask][task_no]
def lowercase (self , UpperCAmelCase ) -> Tuple:
# Store the list of persons for each task
for i in range(len(UpperCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__lowerCAmelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__lowerCAmelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 270
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase (self ) -> Dict:
_snake_case, _snake_case = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case = controlnet_params
_snake_case = """bird"""
_snake_case = jax.device_count()
_snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
_snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(UpperCAmelCase , jax.device_count() )
_snake_case = replicate(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase (self ) -> Optional[int]:
_snake_case, _snake_case = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case = controlnet_params
_snake_case = """Chef in the kitchen"""
_snake_case = jax.device_count()
_snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
_snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(UpperCAmelCase , jax.device_count() )
_snake_case = replicate(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 270
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self :Dict ,__lowercase :Dict ,__lowercase :Dict=1_3 ,__lowercase :List[Any]=3_0 ,__lowercase :int=2 ,__lowercase :List[Any]=3 ,__lowercase :Optional[int]=True ,__lowercase :str=True ,__lowercase :str=3_2 ,__lowercase :Optional[int]=5 ,__lowercase :Optional[Any]=4 ,__lowercase :List[str]=3_7 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :Dict=0.1 ,__lowercase :List[Any]=0.1 ,__lowercase :Any=1_0 ,__lowercase :List[str]=0.02 ,__lowercase :List[str]=None ,__lowercase :Union[str, Any]=2 ,):
snake_case__ : Union[str, Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : List[str] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : str = num_channels
snake_case__ : Optional[int] = is_training
snake_case__ : Optional[Any] = use_labels
snake_case__ : Tuple = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Optional[int] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Optional[int] = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : Any = scope
snake_case__ : List[Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Any = (image_size // patch_size) ** 2
snake_case__ : List[str] = num_patches + 1
def __lowerCamelCase ( self :int ):
snake_case__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self :Any ):
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowerCamelCase ( self :str ,__lowercase :Dict ,__lowercase :str ,__lowercase :List[str] ):
snake_case__ : Optional[int] = ViTModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ):
snake_case__ : Any = ViTForMaskedImageModeling(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : List[str] = 1
snake_case__ : Optional[Any] = ViTForMaskedImageModeling(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Tuple ,__lowercase :Optional[int] ,__lowercase :Any ):
snake_case__ : Tuple = self.type_sequence_label_size
snake_case__ : str = ViTForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Dict = model(__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Optional[int] = 1
snake_case__ : Union[str, Any] = ViTForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : List[Any] = model(__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = config_and_inputs
snake_case__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : str = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : str = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Tuple = False
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = ViTModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self ,config_class=__lowercase ,has_text_modality=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowerCamelCase ( self :Dict ):
pass
def __lowerCamelCase ( self :Dict ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
snake_case__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase ,nn.Linear ) )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(__lowercase )
snake_case__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowercase )
def __lowerCamelCase ( self :Dict ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = ViTModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self :List[Any] ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__lowercase )
snake_case__ : int = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : List[Any] = image_processor(images=__lowercase ,return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
snake_case__ : Tuple = model(**__lowercase )
# verify the logits
snake_case__ : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,__lowercase )
snake_case__ : Optional[Any] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowercase ,atol=1e-4 ) )
@slow
def __lowerCamelCase ( self :Tuple ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
snake_case__ : List[Any] = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__lowercase )
snake_case__ : Optional[Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' ,size=4_8_0 )
snake_case__ : int = prepare_img()
snake_case__ : List[str] = image_processor(images=__lowercase ,return_tensors='''pt''' )
snake_case__ : List[str] = inputs.pixel_values.to(__lowercase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(__lowercase ,interpolate_pos_encoding=__lowercase )
# verify the logits
snake_case__ : Tuple = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape ,__lowercase )
snake_case__ : int = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__lowercase ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCamelCase ( self :Any ):
snake_case__ : str = ViTModel.from_pretrained('''facebook/dino-vits8''' ,torch_dtype=torch.floataa ,device_map='''auto''' )
snake_case__ : Dict = self.default_image_processor
snake_case__ : Any = prepare_img()
snake_case__ : int = image_processor(images=__lowercase ,return_tensors='''pt''' )
snake_case__ : Tuple = inputs.pixel_values.to(__lowercase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case__ : Any = model(__lowercase )
| 230
|
from torch import nn
class a ( nn.Module ):
def __init__( self :Tuple ,__lowercase :Optional[int] ,__lowercase :int ):
super().__init__()
snake_case__ : Optional[Any] = class_size
snake_case__ : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case__ : Dict = nn.Linear(__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ,__lowercase :int ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
snake_case__ : Optional[Any] = self.mlp(__lowercase )
return logits
| 230
| 1
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : Tuple = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=1_8 , A=3_0 , A=4_0_0 , A=None , A=True , A=True , A=None , ) -> Any:
snake_case : List[Any] = size if size is not None else {"""height""": 2_0, """width""": 2_0}
snake_case : Optional[Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Union[str, Any] = num_channels
snake_case : Any = image_size
snake_case : Optional[int] = min_resolution
snake_case : Any = max_resolution
snake_case : Tuple = size
snake_case : Optional[int] = do_normalize
snake_case : Optional[Any] = do_convert_rgb
snake_case : List[Any] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
snake_case : Any = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
def UpperCAmelCase ( self ) -> Dict:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase ( self ) -> Any:
snake_case : Dict = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
snake_case : Tuple = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __lowercase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_snake_case = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[int] = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_convert_rgb""" ) )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = self.image_processor_tester.prepare_dummy_image()
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
snake_case : List[str] = 2_0_4_8
snake_case : Any = image_processor(UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCAmelCase ( self ) -> str:
# Initialize image_processor
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
snake_case : Dict = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Dict = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : List[Any] = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase ( self ) -> Any:
# Initialize image_processor
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
snake_case : Dict = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
snake_case : List[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase_ ):
snake_case : List[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
snake_case : List[str] = """Hello"""
snake_case : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : Optional[int] = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processor
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
snake_case : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Any = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : Any = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
snake_case : Union[str, Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Optional[int] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : int = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __lowercase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_snake_case = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> int:
snake_case : int = PixaStructImageProcessingTester(self , num_channels=4 )
snake_case : Union[str, Any] = 3
@property
def UpperCAmelCase ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_convert_rgb""" ) )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processor
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
snake_case : int = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : str = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : List[str] = image_processor(
UpperCamelCase_ , return_tensors="""pt""" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 357
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
lowerCamelCase : Tuple = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> List[str]:
snake_case : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=A , required=A , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=A , required=A , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=A , required=A , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=A , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=A , default=A , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=A )
def __init__( self , A , A , A , A , A , *A , ) -> List[Any]:
snake_case : Any = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
snake_case : int = model_type
snake_case : Any = tf_checkpoint
snake_case : int = pytorch_dump_output
snake_case : List[str] = config
snake_case : Tuple = finetuning_task_name
def UpperCAmelCase ( self ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
if "ckpt" in self._tf_checkpoint.lower():
snake_case : List[Any] = self._tf_checkpoint
snake_case : Tuple = """"""
else:
snake_case : Tuple = self._tf_checkpoint
snake_case : Tuple = """"""
convert_transfo_xl_checkpoint_to_pytorch(
A , self._config , self._pytorch_dump_output , A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 176
| 0
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any=False ) -> str:
SCREAMING_SNAKE_CASE = OmegaConf.load(SCREAMING_SNAKE_CASE_ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE_ ) ) )
return config
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Any:
if conf_path is None:
SCREAMING_SNAKE_CASE = './model_checkpoints/vqgan_only.yaml'
SCREAMING_SNAKE_CASE = load_config(SCREAMING_SNAKE_CASE_ , display=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE = './model_checkpoints/vqgan_only.pt'
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE = sd['state_dict']
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
del sd
return model
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.encode(SCREAMING_SNAKE_CASE_ )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
SCREAMING_SNAKE_CASE = model.decode(SCREAMING_SNAKE_CASE_ )
return xrec
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any=False ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = string.rsplit('.' , 1 )
if reload:
SCREAMING_SNAKE_CASE = importlib.import_module(SCREAMING_SNAKE_CASE_ )
importlib.reload(SCREAMING_SNAKE_CASE_ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE_ , package=SCREAMING_SNAKE_CASE_ ) , cls )
def lowercase (SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : int=True ) -> Any:
SCREAMING_SNAKE_CASE = instantiate_from_config(SCREAMING_SNAKE_CASE_ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
# load the specified checkpoint
if ckpt:
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
SCREAMING_SNAKE_CASE = pl_sd['global_step']
print(F'loaded model from global step {global_step}.' )
else:
SCREAMING_SNAKE_CASE = {'state_dict': None}
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=SCREAMING_SNAKE_CASE_ , eval_mode=SCREAMING_SNAKE_CASE_ )['model']
return model, global_step
| 113
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=lowerCAmelCase__ , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
class lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=lowerCAmelCase__ , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
def lowercase () -> str:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def lowercase () -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
@require_beam
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __A ( self ) -> int:
import apache_beam as beam
SCREAMING_SNAKE_CASE = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
SCREAMING_SNAKE_CASE = partial(lowerCAmelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __A ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE = NestedBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 113
| 1
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->str:
return 1 / (1 + np.exp(-z ))
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ) ->Any:
return (-y * np.log(_UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->Any:
A__ : Tuple = np.dot(_UpperCAmelCase, _UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCAmelCase ) ) )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any]=7_0_0_0_0 ) ->Any:
A__ : List[str] = np.zeros(x.shape[1] )
for iterations in range(_UpperCAmelCase ):
A__ : Any = np.dot(_UpperCAmelCase, _UpperCAmelCase )
A__ : Any = sigmoid_function(_UpperCAmelCase )
A__ : Union[str, Any] = np.dot(x.T, h - y ) / y.size
A__ : Union[str, Any] = theta - alpha * gradient # updating the weights
A__ : Optional[Any] = np.dot(_UpperCAmelCase, _UpperCAmelCase )
A__ : List[Any] = sigmoid_function(_UpperCAmelCase )
A__ : str = cost_function(_UpperCAmelCase, _UpperCAmelCase )
if iterations % 1_0_0 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ = datasets.load_iris()
A_ = iris.data[:, :2]
A_ = (iris.target != 0) * 1
A_ = 0.1
A_ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Optional[Any]:
return sigmoid_function(
np.dot(_UpperCAmelCase, _UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
(A_) = (x[:, 0].min(), x[:, 0].max())
(A_) = (x[:, 1].min(), x[:, 1].max())
(A_) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ = np.c_[xxa.ravel(), xxa.ravel()]
A_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 365
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__ : Optional[Any] ={
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any =[
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : List[str] =logging.get_logger(__name__)
A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Any ={
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[Any] = VOCAB_FILES_NAMES
_lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: str = PRETRAINED_INIT_CONFIGURATION
_lowercase: List[Any] = RoFormerTokenizer
def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents
):
_lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = strip_accents
_lowerCAmelCase = pre_tok_class(**__snake_case )
_lowerCAmelCase = do_lower_case
def __getstate__( self : int ) -> Optional[int]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]:
_lowerCAmelCase = d
_lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab()
_lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) )
def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]:
_lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
_lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str:
_lowerCAmelCase = BertPreTokenizer()
return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
| 70
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _lowercase ( snake_case_ ):
lowercase = 'gptsan-japanese'
lowercase = [
'past_key_values',
]
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , snake_case : Union[str, Any]=3_6_0_0_0 , snake_case : List[str]=1_2_8_0 , snake_case : int=1_0_2_4 , snake_case : Optional[Any]=8_1_9_2 , snake_case : Dict=4_0_9_6 , snake_case : Any=1_2_8 , snake_case : Optional[int]=1_0 , snake_case : List[Any]=0 , snake_case : int=1_6 , snake_case : Optional[int]=1_6 , snake_case : List[Any]=1_2_8 , snake_case : Optional[Any]=0.0 , snake_case : Optional[Any]=1e-5 , snake_case : Union[str, Any]=False , snake_case : int=0.0 , snake_case : Any="float32" , snake_case : List[Any]=False , snake_case : List[Any]=False , snake_case : List[Any]=False , snake_case : int=0.002 , snake_case : Dict=False , snake_case : int=True , snake_case : Optional[Any]=3_5_9_9_8 , snake_case : Optional[int]=3_5_9_9_5 , snake_case : Optional[int]=3_5_9_9_9 , **snake_case : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = vocab_size
UpperCamelCase_ : Any = max_position_embeddings
UpperCamelCase_ : Dict = d_model
UpperCamelCase_ : Dict = d_ff
UpperCamelCase_ : List[str] = d_ext
UpperCamelCase_ : Any = d_spout
UpperCamelCase_ : Any = num_switch_layers
UpperCamelCase_ : Any = num_ext_layers
UpperCamelCase_ : List[Any] = num_switch_layers + num_ext_layers
UpperCamelCase_ : List[Any] = num_heads
UpperCamelCase_ : Optional[Any] = num_experts
UpperCamelCase_ : int = expert_capacity
UpperCamelCase_ : List[str] = dropout_rate
UpperCamelCase_ : str = layer_norm_epsilon
UpperCamelCase_ : List[str] = router_bias
UpperCamelCase_ : Optional[int] = router_jitter_noise
UpperCamelCase_ : List[str] = router_dtype
UpperCamelCase_ : Any = router_ignore_padding_tokens
UpperCamelCase_ : Optional[int] = output_hidden_states
UpperCamelCase_ : str = output_attentions
UpperCamelCase_ : int = initializer_factor
UpperCamelCase_ : str = output_router_logits
UpperCamelCase_ : List[str] = use_cache
super().__init__(
separator_token_id=snake_case , pad_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
| 366
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self : List[Any] , snake_case : List[str] , snake_case : str=1_3 , snake_case : Any=3_0 , snake_case : Tuple=2 , snake_case : List[Any]=3 , snake_case : str=True , snake_case : List[Any]=True , snake_case : List[Any]=3_2 , snake_case : Union[str, Any]=5 , snake_case : Dict=4 , snake_case : str=3_7 , snake_case : Optional[int]="gelu" , snake_case : Any=0.1 , snake_case : Optional[Any]=0.1 , snake_case : Union[str, Any]=1_0 , snake_case : List[str]=0.02 , snake_case : Optional[int]=3 , snake_case : str=0.6 , snake_case : Any=None , ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : List[str] = image_size
UpperCamelCase_ : Optional[Any] = patch_size
UpperCamelCase_ : Optional[int] = num_channels
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Dict = use_labels
UpperCamelCase_ : Tuple = hidden_size
UpperCamelCase_ : str = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Any = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : Tuple = hidden_dropout_prob
UpperCamelCase_ : Dict = attention_probs_dropout_prob
UpperCamelCase_ : Any = type_sequence_label_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Tuple = mask_ratio
UpperCamelCase_ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase_ : List[Any] = (image_size // patch_size) ** 2
UpperCamelCase_ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : List[str] = None
if self.use_labels:
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Dict , snake_case : List[str] , snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Tuple = model(snake_case )
UpperCamelCase_ : Tuple = (self.image_size // self.patch_size) ** 2
UpperCamelCase_ : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : Dict = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = model(snake_case )
UpperCamelCase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : int = config_and_inputs
UpperCamelCase_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = ViTMAEModelTester(self )
UpperCamelCase_ : Tuple = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Any = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[str] = model_class(snake_case )
UpperCamelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCamelCase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[str] , snake_case : Optional[int] , snake_case : Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : int = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase_ : Tuple = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : str = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : Any = outputs[0].cpu().numpy()
UpperCamelCase_ : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_ : Union[str, Any] = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
UpperCamelCase_ : Optional[Any] = after_outputs[0].cpu().numpy()
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Dict = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : List[str] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case )
UpperCamelCase_ : Tuple = self.default_image_processor
UpperCamelCase_ : Union[str, Any] = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase_ : int = ViTMAEConfig()
UpperCamelCase_ : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase_ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase_ : str = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
UpperCamelCase_ : Dict = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : Union[str, Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
| 50
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =1, 1
_SCREAMING_SNAKE_CASE =[]
for i in range(1 , n + 1 ):
_SCREAMING_SNAKE_CASE =prev_numerator + 2 * prev_denominator
_SCREAMING_SNAKE_CASE =prev_numerator + prev_denominator
if len(str(_UpperCamelCase ) ) > len(str(_UpperCamelCase ) ):
result.append(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =numerator
_SCREAMING_SNAKE_CASE =denominator
return len(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 47
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[str]=9_9 , __UpperCamelCase : int=3_2 , __UpperCamelCase : str=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=3_7 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : int=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=None , )->Union[str, Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int )->str:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase__ ( self : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict )->str:
_UpperCAmelCase = DistilBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] )->Dict:
_UpperCAmelCase = DistilBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Optional[Any]:
_UpperCAmelCase = DistilBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : str )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DistilBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->List[str]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DistilBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] )->Optional[int]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = DistilBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = self.prepare_config_and_inputs()
(_UpperCAmelCase) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def lowercase__ ( self : Tuple )->List[str]:
_UpperCAmelCase = DistilBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , dim=3_7 )
def lowercase__ ( self : Union[str, Any] )->Any:
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def lowercase__ ( self : Tuple )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def lowercase__ ( self : Dict )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def lowercase__ ( self : Tuple )->List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def lowercase__ ( self : Optional[Any] )->str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def lowercase__ ( self : Optional[int] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCAmelCase = True
_UpperCAmelCase = model_class(config=lowerCAmelCase__ )
_UpperCAmelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''traced_model.pt''' ) )
_UpperCAmelCase = torch.jit.load(os.path.join(lowerCAmelCase__ , '''traced_model.pt''' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase__ ) , inputs_dict['''attention_mask'''].to(lowerCAmelCase__ ) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Optional[Any]:
_UpperCAmelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCAmelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 367
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
_UpperCAmelCase = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros((rows, columns) )
_UpperCAmelCase = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_UpperCAmelCase = (table[i][j] - total) / upper[j][j]
_UpperCAmelCase = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
| 0
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCAmelCase : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.0_1),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls ) -> int:
'''simple docstring'''
lowerCamelCase = TOKEN
HfFolder.save_token(A )
@classmethod
def __A ( cls ) -> Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , repo_id="""test-config""" , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-config-org""" , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase = c.n_embd + 1 # int
lowerCamelCase = c.resid_pdrop + 1.0 # float
lowerCamelCase = not c.scale_attn_weights # bool
lowerCamelCase = c.summary_type + """foo""" # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(A , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(A , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(A , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(A , c.summary_type , """mismatch for key: summary_type""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = PretrainedConfig()
lowerCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
A , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(A , A )]
if len(A ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F' {", ".join(A )}.' )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(A )
lowerCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(A , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase = AutoConfig.from_pretrained(A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase = ["""config.42.0.0.json"""]
lowerCamelCase = 7_68
configuration.save_pretrained(A )
shutil.move(os.path.join(A , """config.4.0.0.json""" ) , os.path.join(A , """config.42.0.0.json""" ) )
lowerCamelCase = AutoConfig.from_pretrained(A )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase = """v4.0.0"""
lowerCamelCase , lowerCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
A , return_unused_kwargs=A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase = """v3.0.0"""
lowerCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(A )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 252
|
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=0 ):
'''simple docstring'''
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[column] )
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=float("""inf""" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=float("""inf""" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCamelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCamelCase__ , lowerCamelCase__ )
# recursion
lowerCamelCase = points_counts // 2
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[:mid] , lowerCamelCase__ )
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCamelCase = min(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCamelCase__ )
lowerCamelCase = dis_between_closest_in_strip(
lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
return min(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=0 )
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase : Dict = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 252
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : CommonSchedulerState
# setable values
snake_case : jnp.ndarray
snake_case : jnp.ndarray
snake_case : Optional[int] = None
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return cls(common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase )
@dataclass
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : DDPMSchedulerState
class __SCREAMING_SNAKE_CASE ( _a , _a ):
snake_case : str = [e.name for e in FlaxKarrasDiffusionSchedulers]
snake_case : jnp.dtype
@property
def _lowerCamelCase ( self ):
return True
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = 0.0001 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = "linear" , __lowerCAmelCase = None , __lowerCAmelCase = "fixed_small" , __lowerCAmelCase = True , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = jnp.floataa , ):
UpperCamelCase__ = dtype
def _lowerCamelCase ( self , __lowerCAmelCase = None ):
if common is None:
UpperCamelCase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCamelCase__ = jnp.array(1.0 , dtype=self.dtype )
UpperCamelCase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
return sample
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = () ):
UpperCamelCase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCamelCase__ = (jnp.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ):
UpperCamelCase__ = state.common.alphas_cumprod[t]
UpperCamelCase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCamelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCamelCase__ = jnp.clip(__lowerCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCamelCase__ = jnp.log(jnp.clip(__lowerCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCamelCase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCamelCase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCamelCase__ = variance
UpperCamelCase__ = state.common.betas[t]
UpperCamelCase__ = (predicted_variance + 1) / 2
UpperCamelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , ):
UpperCamelCase__ = timestep
if key is None:
UpperCamelCase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCamelCase__ , UpperCamelCase__ = jnp.split(__lowerCAmelCase , sample.shape[1] , axis=1 )
else:
UpperCamelCase__ = None
# 1. compute alphas, betas
UpperCamelCase__ = state.common.alphas_cumprod[t]
UpperCamelCase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCamelCase__ = 1 - alpha_prod_t
UpperCamelCase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase__ = model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase__ = jnp.clip(__lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCamelCase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCamelCase__ = jax.random.split(__lowerCAmelCase , num=1 )
UpperCamelCase__ = jax.random.normal(__lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCAmelCase , __lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
UpperCamelCase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCamelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCAmelCase , state=__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
return add_noise_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
return get_velocity_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 87
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87
| 1
|
__UpperCamelCase : str = """Input must be a string of 8 numbers plus letter"""
__UpperCamelCase : List[Any] = """TRWAGMYFPDXBNJZSQVHLCKE"""
def a_ ( _A ) -> bool:
"""simple docstring"""
if not isinstance(_A , _A ):
snake_case__ = f'''Expected string as input, found {type(_A ).__name__}'''
raise TypeError(_A )
snake_case__ = spanish_id.replace('-' , '' ).upper()
if len(_A ) != 9:
raise ValueError(_A )
try:
snake_case__ = int(spanish_id_clean[0:8] )
snake_case__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_A ) from ex
if letter.isdigit():
raise ValueError(_A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ) -> Any:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ) -> Tuple:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ) -> str:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 307
| 1
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 360
|
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : str = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = 'xlm-prophetnet'
__lowercase : Optional[int] = ['past_key_values']
__lowercase : int = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 3_0522 , _SCREAMING_SNAKE_CASE = 1024 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2 , **_SCREAMING_SNAKE_CASE , ) -> int:
A_ = vocab_size
A_ = hidden_size
A_ = encoder_ffn_dim
A_ = num_encoder_layers
A_ = num_encoder_attention_heads
A_ = decoder_ffn_dim
A_ = num_decoder_layers
A_ = num_decoder_attention_heads
A_ = max_position_embeddings
A_ = init_std # Normal(0, this parameter)
A_ = activation_function
# parameters for xlmprophetnet
A_ = ngram
A_ = num_buckets
A_ = relative_max_distance
A_ = disable_ngram_loss
A_ = eps
# 3 Types of Dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = dropout
A_ = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , add_cross_attention=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@property
def __A ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 18
| 0
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : Optional[Any] = re.compile(r"""\s+""")
def A_ ( _lowerCAmelCase ) -> Optional[int]:
return {"hash": hashlib.mda(re.sub(_a , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Dict = [len(_a ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_a ), "line_max": max(_a )}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase , _lowerCAmelCase=5 ) -> Optional[Any]:
UpperCamelCase : Tuple = ["auto-generated", "autogenerated", "automatically generated"]
UpperCamelCase : List[str] = example["content"].splitlines()
for _, line in zip(range(_a ) , _a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 ) -> str:
UpperCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
UpperCamelCase : Tuple = example["content"].splitlines()
UpperCamelCase : Dict = 0
UpperCamelCase : Union[str, Any] = 0
# first test
for _, line in zip(range(_a ) , _a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCamelCase : Union[str, Any] = example["content"].count("\n" )
UpperCamelCase : str = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : str = ["def ", "class ", "for ", "while "]
UpperCamelCase : str = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase , _lowerCAmelCase=4 ) -> str:
UpperCamelCase : Optional[Any] = example["content"].splitlines()
UpperCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_a )["input_ids"]
UpperCamelCase : int = len(example["content"] ) / len(_a )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : int = {}
results.update(get_hash(_a ) )
results.update(line_stats(_a ) )
results.update(alpha_stats(_a ) )
results.update(char_token_ratio(_a ) )
results.update(is_autogenerated(_a ) )
results.update(is_config_or_test(_a ) )
results.update(has_no_keywords(_a ) )
results.update(has_few_assignments(_a ) )
return results
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if not check_uniques(_a , _a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase ) -> List[Any]:
with open(_a , "rb" ) as f_in:
with gzip.open(str(_a ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_a , _a )
os.unlink(_a )
# Settings
__lowerCamelCase : List[str] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : Any = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : Dict = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : str = time.time()
__lowerCamelCase : Optional[int] = load_dataset(args.dataset_name, split="""train""")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__lowerCamelCase : List[Any] = time.time()
__lowerCamelCase : str = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__lowerCamelCase : str = set(ds.unique("""hash"""))
__lowerCamelCase : Union[str, Any] = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__lowerCamelCase : Union[str, Any] = time.time()
__lowerCamelCase : Dict = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : Any = time.time()
__lowerCamelCase , __lowerCamelCase : Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__lowerCamelCase : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : Optional[Any] = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Optional[int] = str(data_dir / f"""file-{file_number+1:012}.json""")
__lowerCamelCase : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 52
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34
| 0
|
import functools
from typing import Any
def _a ( UpperCAmelCase , UpperCAmelCase ) -> bool:
"""simple docstring"""
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or len(UpperCAmelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(
isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
lowerCamelCase__ : dict[str, Any] = {}
lowerCamelCase__ : Union[str, Any] = '''WORD_KEEPER'''
for word in words:
lowerCamelCase__ : List[str] = trie
for c in word:
if c not in trie_node:
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : str = trie_node[c]
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Tuple = len(UpperCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCAmelCase ) -> bool:
if index == len_string:
return True
lowerCamelCase__ : Union[str, Any] = trie
for i in range(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = trie_node.get(string[i] , UpperCAmelCase )
if trie_node is None:
return False
if trie_node.get(UpperCAmelCase , UpperCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
|
from math import ceil, sqrt
def _a ( UpperCAmelCase = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : Any = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__ : List[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__ : Union[str, Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265
| 1
|
import re
import string
import numpy as np
import datasets
__UpperCamelCase : Union[str, Any] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
__UpperCamelCase : Union[str, Any] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
__UpperCamelCase : Any = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE( datasets.Metric ):
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: str , UpperCamelCase: str , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[int]=False , UpperCamelCase: Any=False , UpperCamelCase: Optional[int]=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case__ = np.array([re.sub(UpperCamelCase , '' , UpperCamelCase ) for x in predictions] )
snake_case__ = np.array([re.sub(UpperCamelCase , '' , UpperCamelCase ) for x in references] )
else:
snake_case__ = np.asarray(UpperCamelCase )
snake_case__ = np.asarray(UpperCamelCase )
if ignore_case:
snake_case__ = np.char.lower(UpperCamelCase )
snake_case__ = np.char.lower(UpperCamelCase )
if ignore_punctuation:
snake_case__ = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case__ = np.char.translate(UpperCamelCase , table=UpperCamelCase )
snake_case__ = np.char.translate(UpperCamelCase , table=UpperCamelCase )
if ignore_numbers:
snake_case__ = string.digits.maketrans('' , '' , string.digits )
snake_case__ = np.char.translate(UpperCamelCase , table=UpperCamelCase )
snake_case__ = np.char.translate(UpperCamelCase , table=UpperCamelCase )
snake_case__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase ) * 1_00}
| 307
|
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> None:
snake_case__ = [2, 1, 2, -1]
snake_case__ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self: List[str] ) -> list[float]:
snake_case__ = len(self.first_signal )
snake_case__ = len(self.second_signal )
snake_case__ = max(UpperCamelCase , UpperCamelCase )
# create a zero matrix of max_length x max_length
snake_case__ = [[0] * max_length for i in range(UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase ):
snake_case__ = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase )
for j, item in enumerate(UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 307
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Any , _lowercase : List[Any] , _lowercase : List[Any]=7 , _lowercase : int=3 , _lowercase : str=30 , _lowercase : List[str]=4_00 , _lowercase : Dict=True , _lowercase : int=None , _lowercase : List[str]=True , _lowercase : Optional[Any]=[0.5, 0.5, 0.5] , _lowercase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowercase : Tuple=True , _lowercase : Optional[Any]=1 / 2_55 , _lowercase : str=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def __a ( self : List[str] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self : List[Any] , _lowercase : int , _lowercase : Dict=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size["""shortest_edge"""] * h / w )
SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""]
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE__ = int(self.size["""shortest_edge"""] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(_lowercase , key=lambda _lowercase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = DetaImageProcessor if is_vision_available() else None
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DetaImageProcessingTester(self )
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """do_rescale""" ) )
self.assertTrue(hasattr(_lowercase , """do_pad""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , _lowercase )
def __a ( self : str ):
"""simple docstring"""
pass
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
SCREAMING_SNAKE_CASE__ = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
SCREAMING_SNAKE_CASE__ = DetaImageProcessor()
SCREAMING_SNAKE_CASE__ = image_processing(images=_lowercase , annotations=_lowercase , return_tensors="""pt""" )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , _lowercase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowercase , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowercase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowercase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowercase , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowercase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowercase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowercase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowercase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowercase ) )
@slow
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
SCREAMING_SNAKE_CASE__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
SCREAMING_SNAKE_CASE__ = DetaImageProcessor(format="""coco_panoptic""" )
SCREAMING_SNAKE_CASE__ = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors="""pt""" )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , _lowercase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowercase , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowercase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowercase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowercase , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowercase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowercase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowercase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowercase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowercase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowercase ) )
| 204
|
from math import isqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**8 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 204
| 1
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int = 13 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : int = 128 , __UpperCAmelCase : Dict=[16, 32, 64, 128] , __UpperCAmelCase : int = 7 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 37 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 10 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 128 , __UpperCAmelCase : List[int] = [2, 2, 2, 2] , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ):
a : int = parent
a : List[str] = batch_size
a : List[Any] = image_size
a : Union[str, Any] = patch_size
a : Any = num_channels
a : Union[str, Any] = is_training
a : Union[str, Any] = use_labels
a : Tuple = hidden_size
a : Tuple = num_hidden_layers
a : int = num_attention_heads
a : Union[str, Any] = intermediate_size
a : int = hidden_act
a : Tuple = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : List[str] = type_sequence_label_size
a : str = initializer_range
a : Optional[int] = encoder_stride
a : Optional[int] = num_attention_outputs
a : Any = embed_dim
a : Dict = embed_dim + 1
a : Union[str, Any] = resolution
a : Optional[int] = depths
a : Union[str, Any] = hidden_sizes
a : Dict = dim
a : List[str] = mlp_expansion_ratio
def __snake_case ( self : Dict):
a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : str = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Optional[int]):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Dict):
a : Optional[Any] = TFEfficientFormerModel(config=__UpperCAmelCase)
a : int = model(__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple):
a : Optional[int] = self.type_sequence_label_size
a : Union[str, Any] = TFEfficientFormerForImageClassification(__UpperCAmelCase)
a : Dict = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : Union[str, Any] = 1
a : Any = TFEfficientFormerForImageClassification(__UpperCAmelCase)
a : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __snake_case ( self : Tuple):
a : Union[str, Any] = self.prepare_config_and_inputs()
a , a , a : List[str] = config_and_inputs
a : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Union[str, Any] = False
def __snake_case ( self : List[Any]):
a : Any = TFEfficientFormerModelTester(self)
a : Optional[Any] = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : str):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds")
def __snake_case ( self : int):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings")
def __snake_case ( self : Optional[Any]):
pass
def __snake_case ( self : Optional[Any]):
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = model_class(__UpperCAmelCase)
a : Optional[Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[str] = [*signature.parameters.keys()]
a : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def __snake_case ( self : str):
def check_hidden_states_output(__UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]):
a : int = model_class(__UpperCAmelCase)
a : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) , training=__UpperCAmelCase)
a : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
if hasattr(self.model_tester , "encoder_seq_length"):
a : Optional[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length") and self.model_tester.chunk_length > 1:
a : Any = seq_length * self.model_tester.chunk_length
else:
a : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
a : Union[str, Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__UpperCAmelCase , (list, tuple))
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
a : int = getattr(self.model_tester , "seq_length" , __UpperCAmelCase)
a : Any = getattr(self.model_tester , "decoder_seq_length" , __UpperCAmelCase)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=False):
a : str = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __snake_case ( self : str):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet")
def __snake_case ( self : Union[str, Any]):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase)
def __snake_case ( self : str):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : int):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[int] = TFEfficientFormerModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = True
a : Dict = getattr(self.model_tester , "seq_length" , __UpperCAmelCase)
a : List[str] = getattr(self.model_tester , "encoder_seq_length" , __UpperCAmelCase)
a : List[Any] = getattr(self.model_tester , "key_length" , __UpperCAmelCase)
a : List[str] = getattr(self.model_tester , "chunk_length" , __UpperCAmelCase)
if chunk_length is not None and hasattr(self.model_tester , "num_hashes"):
a : Tuple = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
a : Optional[int] = True
a : Dict = False
a : Union[str, Any] = True
a : Any = model_class(__UpperCAmelCase)
a : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) , training=__UpperCAmelCase)
a : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : str = True
a : Dict = model_class(__UpperCAmelCase)
a : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) , training=__UpperCAmelCase)
a : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __snake_case ( self : Optional[int]):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
a , a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
a : Any = model_class(__UpperCAmelCase)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
a : str = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__UpperCAmelCase)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
a : Optional[int] = model(__UpperCAmelCase)
self.assertTrue(outputs_dict is not None)
def lowercase ( )-> Dict:
'''simple docstring'''
a : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Tuple):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300")
if is_vision_available()
else None
)
@slow
def __snake_case ( self : Optional[int]):
a : Any = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300")
a : Optional[int] = self.default_image_processor
a : int = prepare_img()
a : Optional[Any] = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a : Tuple = model(**__UpperCAmelCase , training=__UpperCAmelCase)
# verify the logits
a : str = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
@slow
def __snake_case ( self : Optional[Any]):
a : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300")
a : int = self.default_image_processor
a : int = prepare_img()
a : str = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a : Union[str, Any] = model(**__UpperCAmelCase , training=__UpperCAmelCase)
# verify the logits
a : str = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : Any = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
| 40
|
import os
def _lowercase ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase_ ):
for letter in name:
name_score += ord(UpperCamelCase_ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 176
| 0
|
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCamelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
_A = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
lowerCamelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_A = list(s_dict.keys() )
for key in keys:
_A = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_A = new_key.replace(__lowercase , __lowercase )
print(F'''{key} -> {new_key}''' )
_A = s_dict.pop(__lowercase )
return s_dict
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def __lowercase ( __lowercase , __lowercase ) -> bytes:
'''simple docstring'''
os.makedirs(__lowercase , exist_ok=__lowercase )
_A = os.path.basename(__lowercase )
_A = url.split("/" )[-2]
_A = os.path.join(__lowercase , __lowercase )
if os.path.exists(__lowercase ) and not os.path.isfile(__lowercase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(__lowercase ):
_A = open(__lowercase , "rb" ).read()
if hashlib.shaaaa(__lowercase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(__lowercase ) as source, open(__lowercase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=__lowercase , unit_divisor=1024 ) as loop:
while True:
_A = source.read(8192 )
if not buffer:
break
output.write(__lowercase )
loop.update(len(__lowercase ) )
_A = open(__lowercase , "rb" ).read()
if hashlib.shaaaa(__lowercase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_A = _download(_MODELS[checkpoint_path] )
else:
_A = torch.load(__lowercase , map_location="cpu" )
_A = original_checkpoint["dims"]
_A = original_checkpoint["model_state_dict"]
_A = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(__lowercase )
rename_keys(__lowercase )
_A = True
_A = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_A = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=__lowercase , decoder_ffn_dim=__lowercase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_A = WhisperForConditionalGeneration(__lowercase )
_A , _A = model.model.load_state_dict(__lowercase , strict=__lowercase )
if len(__lowercase ) > 0 and not set(__lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
_A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_A = proj_out_weights
model.save_pretrained(__lowercase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 174
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ) -> Tuple:
'''simple docstring'''
_A = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase )
_A = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
_A , _A = parser.parse_known_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
_A = parse_unknown_args(__lowercase )
# Run
_A = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 174
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def A (__A : str , __A : Optional[int] , __A : Optional[Any] , __A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
UpperCAmelCase_ = TOKENIZER_CLASSES
else:
UpperCAmelCase_ = {tokenizer_name: getattr(__A , tokenizer_name + '''Fast''' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase_ = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase_ = True
if checkpoint_name is None:
UpperCAmelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase_ = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
UpperCAmelCase_ = tokenizer_class.from_pretrained(__A , force_download=__A )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase_ , UpperCAmelCase_ = checkpoint.split('''/''' )
UpperCAmelCase_ = os.path.join(__A , __A )
elif add_prefix:
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = dump_path
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase_ = file_path.split(__A )[-1][0]
if next_char == "/":
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
UpperCAmelCase_ = tokenizer.save_pretrained(
__A , legacy_format=__A , filename_prefix=__A )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__A )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 51
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 296
| 0
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( _a = "https://www.worldometers.info/coronavirus" ) -> dict:
'''simple docstring'''
lowercase_ :List[str] = BeautifulSoup(requests.get(lowercase_ ).text , '''html.parser''' )
lowercase_ :Optional[int] = soup.findAll('''h1''' )
lowercase_ :List[str] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowercase_ , lowercase_ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f"{key}\n{value}\n")
| 362
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE : Any = random.Random()
def UpperCamelCase ( _a , _a=1.0 , _a=None , _a=None ) -> str:
'''simple docstring'''
if rng is None:
lowercase_ :Optional[Any] = global_rng
lowercase_ :List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=400 , UpperCamelCase_=2000 , UpperCamelCase_=1 , UpperCamelCase_=0.0 , UpperCamelCase_=1_6000 , UpperCamelCase_=True , UpperCamelCase_=True , ):
lowercase_ :Any = parent
lowercase_ :Any = batch_size
lowercase_ :int = min_seq_length
lowercase_ :Optional[int] = max_seq_length
lowercase_ :Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ :Any = feature_size
lowercase_ :str = padding_value
lowercase_ :Optional[int] = sampling_rate
lowercase_ :int = return_attention_mask
lowercase_ :Optional[Any] = do_normalize
def UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , UpperCamelCase_=False , UpperCamelCase_=False ):
def _flatten(UpperCamelCase_ ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
lowercase_ :Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase_ :List[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ :int = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =WavaVecaFeatureExtractor
def UpperCamelCase ( self ):
lowercase_ :Tuple = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.assertTrue(np.all(np.mean(UpperCamelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ :int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Any = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ :Any = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowercase_ :List[str] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test batched
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
lowercase_ :Tuple = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ :Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ :int = np.asarray(UpperCamelCase_ )
lowercase_ :List[str] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Any = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase_ :int = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''np''' )
lowercase_ :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Union[str, Any] = range(800 , 1400 , 200 )
lowercase_ :Optional[int] = [floats_list((1, x) )[0] for x in lengths]
lowercase_ :Any = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase_ :Optional[Any] = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = feat_extract(UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ )
lowercase_ :Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :str = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
lowercase_ :Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Optional[int] = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
lowercase_ :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase_ :Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Optional[int] = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
lowercase_ :int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase ( self ):
import torch
lowercase_ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Any = np.random.rand(100 ).astype(np.floataa )
lowercase_ :Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ :List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase_ :Dict = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase ( self ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowercase_ :List[Any] = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
lowercase_ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 252
| 0
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCamelCase__ : Union[str, Any] = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def UpperCAmelCase_ ( __UpperCAmelCase : Dict=True ) -> Optional[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCamelCase ) )
class lowerCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
lowercase_ = None
lowercase_ = None
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = dataset_module_factory(_lowerCAmelCase , cache_dir=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = builder_cls(
cache_dir=_lowerCAmelCase , config_name=_lowerCAmelCase , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE_ = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_lowerCAmelCase ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE_ = cached_path(_lowerCAmelCase , cache_dir=_lowerCAmelCase )
self.assertTrue(os.path.exists(_lowerCAmelCase ) )
@pytest.mark.integration
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE_ = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE_ = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE_ = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase )
assert next(iter(ds['train'] ) )
| 225
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50
| 0
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=7 ):
__snake_case : str = None
if token is not None:
__snake_case : str = {"Accept": "application/vnd.github+json", "Authorization": F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
__snake_case : Optional[Any] = "636036"
__snake_case : Dict = F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
__snake_case : str = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
return result["workflow_runs"]
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = get_daily_ci_runs(__lowerCamelCase )
__snake_case : int = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__snake_case : int = workflow_run["id"]
break
return workflow_run_id
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = get_last_daily_ci_runs(__lowerCamelCase )
if workflow_run_id is not None:
__snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=__lowerCamelCase , token=__lowerCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__snake_case : str = artifacts_links[artifact_name]
download_artifact(
artifact_name=__lowerCamelCase , artifact_url=__lowerCamelCase , output_dir=__lowerCamelCase , token=__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
get_last_daily_ci_artifacts(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : int = {}
for artifact_name in artifact_names:
__snake_case : str = os.path.join(__lowerCamelCase , F'{artifact_name}.zip' )
if os.path.isfile(__lowerCamelCase ):
__snake_case : Dict = {}
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
with z.open(__lowerCamelCase ) as f:
__snake_case : Optional[int] = f.read().decode("UTF-8" )
return results
| 134
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = []
__snake_case : Optional[Any] = []
__snake_case : List[Any] = []
for rt in rc.restypes:
__snake_case : Any = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__snake_case : Tuple = {name: i for i, name in enumerate(__lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__snake_case : int = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
__snake_case : List[str] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
__snake_case : Optional[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
__snake_case : Optional[int] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__snake_case : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
__snake_case : Tuple = restype_atomaa_mask[protein_aatype]
__snake_case : Optional[Any] = residx_atomaa_mask
__snake_case : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__snake_case : Dict = restype_atomaa_to_atomaa[protein_aatype]
__snake_case : Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__snake_case : List[str] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
__snake_case : List[str] = rc.restype_atoa[restype_letter]
__snake_case : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__snake_case : Union[str, Any] = rc.atom_order[atom_name]
__snake_case : str = 1
__snake_case : List[str] = restype_atomaa_mask[protein_aatype]
__snake_case : List[str] = residx_atomaa_mask
return protein
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray )
__snake_case : str = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) )
return out
| 134
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
while repunit:
_lowerCAmelCase : List[Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : Optional[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
|
class _a :
def __init__( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = []
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase__ = self.__min_dist_top_down_dp(UpperCamelCase_ , n - 1 )
lowercase__ = self.__min_dist_top_down_dp(m - 1 , UpperCamelCase_ )
lowercase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self.dp[m][n]
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str ) -> int:
"""simple docstring"""
lowercase__ = worda
lowercase__ = worda
lowercase__ = [[-1 for _ in range(len(UpperCamelCase_ ) )] for _ in range(len(UpperCamelCase_ ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase_ ) - 1 , len(UpperCamelCase_ ) - 1 )
def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: str ) -> int:
"""simple docstring"""
lowercase__ = worda
lowercase__ = worda
lowercase__ = len(UpperCamelCase_ )
lowercase__ = len(UpperCamelCase_ )
lowercase__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase__ = j
elif j == 0: # second string is empty
lowercase__ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase__ = self.dp[i - 1][j - 1]
else:
lowercase__ = self.dp[i][j - 1]
lowercase__ = self.dp[i - 1][j]
lowercase__ = self.dp[i - 1][j - 1]
lowercase__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
lowerCAmelCase = input('Enter the first string: ').strip()
lowerCAmelCase = input('Enter the second string: ').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 110
| 0
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCamelCase__ ( ):
__lowerCamelCase : Dict = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
__lowerCamelCase : Any = Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
return dataset
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : List[Any] = get_dataset()
__lowerCamelCase : int = make_duplicate_clusters(a , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = get_dataset()
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = deduplicate_dataset(a )
self.assertEqual(len(a ) , 2 )
print(a )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , a )
| 194
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ = logging.get_logger(__name__)
@dataclass
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self: Any , **a: Optional[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCamelCase : str = deprecated_arg[3:]
setattr(self , a , not kwargs.pop(a ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
__lowerCamelCase : str = kwargs.pop('torchscript' , self.torchscript )
__lowerCamelCase : int = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
__lowerCamelCase : Dict = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**a )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Trace the models using torchscript"""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__snake_case = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def _snake_case ( self: Dict ):
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
__lowerCamelCase : Dict = torch.device('cpu' )
__lowerCamelCase : str = 0
elif is_torch_tpu_available():
__lowerCamelCase : Optional[int] = xm.xla_device()
__lowerCamelCase : Dict = 0
else:
__lowerCamelCase : Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__lowerCamelCase : Union[str, Any] = torch.cuda.device_count()
return device, n_gpu
@property
def _snake_case ( self: Optional[Any] ):
return is_torch_tpu_available() and self.tpu
@property
def _snake_case ( self: Union[str, Any] ):
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _snake_case ( self: int ):
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _snake_case ( self: Union[str, Any] ):
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _snake_case ( self: List[Any] ):
return self.n_gpu > 0
| 194
| 1
|
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Any =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE__ : int =test_metrics
@require_cpu
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __magic_name__ ( self : Any ) -> List[str]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __magic_name__ ( self : int ) -> Any:
self.test_metrics.main()
@require_multi_gpu
def __magic_name__ ( self : int ) -> Optional[Any]:
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE__ : List[Any] =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
| 361
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE__ : str =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.images
SCREAMING_SNAKE_CASE__ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : int =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE__ : Any ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : Dict =output.images
SCREAMING_SNAKE_CASE__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : Tuple =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
SCREAMING_SNAKE_CASE__ : Tuple ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =output.images
SCREAMING_SNAKE_CASE__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 222
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18
| 0
|
from __future__ import annotations
from math import pow, sqrt
def A_ ( A__ , A__ , A__ ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(A__ , 2 ) - pow(A__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(A__ , 2 ) - pow(A__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(A__ , 2 ) + pow(A__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase : Optional[Any] = """"""
lowercase : int = """"""
lowercase : List[Any] = """"""
lowercase : Optional[int] = 1 # (0 is vertical, 1 is horizontal)
def A_ ( ) -> None:
a__ , a__ : str = get_dataset(A__ , A__ )
print('Processing...' )
a__ , a__ , a__ : Tuple = update_image_and_anno(A__ , A__ , A__ )
for index, image in enumerate(A__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a__ : int = random_chars(32 )
a__ : Optional[Any] = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
a__ : Optional[int] = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(A__ )} with {file_name}' )
a__ : List[str] = []
for anno in new_annos[index]:
a__ : Union[str, Any] = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(A__ )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def A_ ( A__ , A__ ) -> tuple[list, list]:
a__ : int = []
a__ : int = []
for label_file in glob.glob(os.path.join(A__ , '*.txt' ) ):
a__ : Optional[Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(A__ ) as in_file:
a__ : Tuple = in_file.readlines()
a__ : Dict = os.path.join(A__ , F'{label_name}.jpg' )
a__ : int = []
for obj_list in obj_lists:
a__ : Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def A_ ( A__ , A__ , A__ = 1 ) -> tuple[list, list, list]:
a__ : Optional[int] = []
a__ : Any = []
a__ : Dict = []
for idx in range(len(A__ ) ):
a__ : Optional[int] = []
a__ : Optional[Any] = img_list[idx]
path_list.append(A__ )
a__ : Union[str, Any] = anno_list[idx]
a__ : List[str] = cva.imread(A__ )
if flip_type == 1:
a__ : List[str] = cva.flip(A__ , A__ )
for bbox in img_annos:
a__ : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
a__ : Optional[Any] = cva.flip(A__ , A__ )
for bbox in img_annos:
a__ : Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A__ )
new_imgs_list.append(A__ )
return new_imgs_list, new_annos_lists, path_list
def A_ ( A__ = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a__ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 225
| 1
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __a :
def __init__( self , lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = parent
lowercase__: List[str] = 13
lowercase__: Any = 7
lowercase__: Union[str, Any] = True
lowercase__: Optional[Any] = True
lowercase__: Optional[Any] = False
lowercase__: Optional[int] = True
lowercase__: Optional[Any] = 99
lowercase__: Tuple = 32
lowercase__: Dict = 2
lowercase__: Union[str, Any] = 4
lowercase__: str = 37
lowercase__: Dict = 'gelu'
lowercase__: Union[str, Any] = 0.1
lowercase__: List[Any] = 0.1
lowercase__: List[Any] = 512
lowercase__: List[str] = 16
lowercase__: Tuple = 2
lowercase__: Union[str, Any] = 0.0_2
lowercase__: Any = 3
lowercase__: Union[str, Any] = 4
lowercase__: Optional[int] = None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: int = None
if self.use_input_mask:
lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
lowercase__: Optional[int] = None
lowercase__: List[Any] = None
if self.use_labels:
lowercase__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = TFDistilBertModel(config=lowerCAmelCase__ )
lowercase__: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase__: Dict = model(lowerCAmelCase__ )
lowercase__: Dict = [input_ids, input_mask]
lowercase__: int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = TFDistilBertForMaskedLM(config=lowerCAmelCase__ )
lowercase__: Any = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase__: Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = TFDistilBertForQuestionAnswering(config=lowerCAmelCase__ )
lowercase__: List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
lowercase__: Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = self.num_labels
lowercase__: Optional[Any] = TFDistilBertForSequenceClassification(lowerCAmelCase__ )
lowercase__: Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase__: int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = self.num_choices
lowercase__: int = TFDistilBertForMultipleChoice(lowerCAmelCase__ )
lowercase__: Any = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase__: List[str] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase__: Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
lowercase__: Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = self.num_labels
lowercase__: Tuple = TFDistilBertForTokenClassification(lowerCAmelCase__ )
lowercase__: int = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase__: List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: List[str] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)): List[Any] = config_and_inputs
lowercase__: str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : int = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowercase : Tuple = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : int = False
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = TFDistilBertModelTester(self )
lowercase__: int = ConfigTester(self , config_class=lowerCAmelCase__ , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase__: str = TFDistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__: str = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__: Tuple = model(lowerCAmelCase__ )[0]
lowercase__: Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: List[str] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
| 196
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3
| 0
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any) -> Dict:
"""simple docstring"""
_snake_case : Any = torch.nn.Linear(10 , 10)
_snake_case : Dict = torch.optim.SGD(model.parameters() , 0.1)
_snake_case : str = Accelerator()
_snake_case : Any = accelerator.prepare(lowerCamelCase__)
try:
pickle.loads(pickle.dumps(lowerCamelCase__))
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state()
| 317
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''input_features''', '''is_longer''']
def __init__( self , lowerCamelCase__=64 , lowerCamelCase__=48_000 , lowerCamelCase__=480 , lowerCamelCase__=10 , lowerCamelCase__=1_024 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__ = 0 , lowerCamelCase__ = 14_000 , lowerCamelCase__ = None , lowerCamelCase__ = "fusion" , lowerCamelCase__ = "repeatpad" , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = top_db
__lowerCamelCase = truncation
__lowerCamelCase = padding
__lowerCamelCase = fft_window_size
__lowerCamelCase = (fft_window_size >> 1) + 1
__lowerCamelCase = hop_length
__lowerCamelCase = max_length_s
__lowerCamelCase = max_length_s * sampling_rate
__lowerCamelCase = sampling_rate
__lowerCamelCase = frequency_min
__lowerCamelCase = frequency_max
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale='htk' , )
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm='slaney' , mel_scale='slaney' , )
def lowercase_ ( self ) -> Dict[str, Any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel='dB' , )
return log_mel_spectrogram.T
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
# randomly choose index for each part
__lowerCamelCase = np.random.choice(ranges[0] )
__lowerCamelCase = np.random.choice(ranges[1] )
__lowerCamelCase = np.random.choice(ranges[2] )
__lowerCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__lowerCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowerCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__lowerCamelCase = torch.tensor(mel[None, None, :] )
__lowerCamelCase = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=lowerCamelCase__ )
__lowerCamelCase = mel_shrink[0][0].numpy()
__lowerCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowerCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowerCamelCase = len(lowerCamelCase__ ) - max_length
__lowerCamelCase = np.random.randint(0 , overflow + 1 )
__lowerCamelCase = waveform[idx : idx + max_length]
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__lowerCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowerCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowerCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__lowerCamelCase = False
else:
__lowerCamelCase = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
__lowerCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowerCamelCase = int(max_length / len(lowerCamelCase__ ) )
__lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowerCamelCase = int(max_length / len(lowerCamelCase__ ) )
__lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__lowerCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchFeature:
'''simple docstring'''
__lowerCamelCase = truncation if truncation is not None else self.truncation
__lowerCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowerCamelCase = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
__lowerCamelCase = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
__lowerCamelCase = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
__lowerCamelCase = []
__lowerCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowerCamelCase = np.random.randint(0 , len(lowerCamelCase__ ) )
__lowerCamelCase = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
__lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowerCamelCase = [[longer] for longer in is_longer]
__lowerCamelCase = {'input_features': input_mel, 'is_longer': is_longer}
__lowerCamelCase = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
__lowerCamelCase = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 90
| 0
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a__ ( SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : str =field(
metadata={"help": "The csv file to plot."} , )
a : bool =field(
default=lowercase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
a : bool =field(
default=lowercase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
a : bool =field(
default=lowercase , metadata={"help": "Disable logarithmic scale when plotting"} , )
a : bool =field(
default=lowercase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
a : Optional[List[str]] =list_field(
default=lowercase , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = args
lowerCAmelCase : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
lowerCAmelCase : List[Any] = csv.DictReader(snake_case__ )
for row in reader:
lowerCAmelCase : Dict = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
lowerCAmelCase : Optional[Any] = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
lowerCAmelCase : Any = float(row["result"] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = plt.subplots()
lowerCAmelCase : Any = "Time usage" if self.args.is_time else "Memory usage"
lowerCAmelCase : str = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase : Tuple = sorted(set(self.result_dict[model_name]["bsz"] ) )
lowerCAmelCase : Dict = sorted(set(self.result_dict[model_name]["seq_len"] ) )
lowerCAmelCase : int = self.result_dict[model_name]["result"]
(lowerCAmelCase) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase : Dict = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=snake_case__ , )
else:
lowerCAmelCase : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(lowerCAmelCase) : Tuple = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
lowerCAmelCase : int = np.asarray(snake_case__ , snake_case__ )[: len(snake_case__ )]
plt.scatter(
snake_case__ , snake_case__ , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(snake_case__ , snake_case__ , "--" )
title_str += f""" {label_model_name} vs."""
lowerCAmelCase : Dict = title_str[:-4]
lowerCAmelCase : Optional[int] = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(snake_case__ )
plt.xlabel(snake_case__ )
plt.ylabel(snake_case__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Any = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase : Dict = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 353
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , **snake_case__ , ):
"""simple docstring"""
super().__init__(features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = Sql(
cache_dir=snake_case__ , features=snake_case__ , sql=snake_case__ , con=snake_case__ , **snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : str = None
lowerCAmelCase : int = None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , )
# Build dataset for splits
lowerCAmelCase : str = self.builder.as_dataset(
split="train" , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowerCAmelCase : Tuple = dataset
lowerCAmelCase : Tuple = name
lowerCAmelCase : List[str] = con
lowerCAmelCase : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCAmelCase : Optional[int] = num_proc
lowerCAmelCase : Optional[int] = to_sql_kwargs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.to_sql_kwargs.pop("sql" , snake_case__ )
lowerCAmelCase : List[Any] = self.to_sql_kwargs.pop("con" , snake_case__ )
lowerCAmelCase : Dict = self.to_sql_kwargs.pop("index" , snake_case__ )
lowerCAmelCase : Dict = self._write(index=snake_case__ , **self.to_sql_kwargs )
return written
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = args
lowerCAmelCase : Optional[int] = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
lowerCAmelCase : List[Any] = query_table(
table=self.dataset.data , key=slice(snake_case__ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCAmelCase : Tuple = batch.to_pandas()
lowerCAmelCase : str = df.to_sql(self.name , self.con , index=snake_case__ , **snake_case__ )
return num_rows or len(snake_case__ )
def lowercase__ ( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCAmelCase , lowerCAmelCase : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , snake_case__ , snake_case__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 133
| 0
|
'''simple docstring'''
import argparse
import os
import re
_UpperCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
_UpperCAmelCase : Any = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_UpperCAmelCase : List[Any] = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_UpperCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_UpperCAmelCase : Tuple = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_UpperCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = _re_indent.search(lowerCamelCase)
return "" if search is None else search.groups()[0]
def __magic_name__( lowerCamelCase, lowerCamelCase="", lowerCamelCase=None, lowerCamelCase=None):
__lowerCAmelCase = 0
__lowerCAmelCase = code.split('''\n''')
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase):
index += 1
__lowerCAmelCase = ['''\n'''.join(lines[:index])]
else:
__lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCAmelCase = [lines[index]]
index += 1
while index < len(lowerCamelCase) and (end_prompt is None or not lines[index].startswith(lowerCamelCase)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(lowerCamelCase) > 0 and get_indent(current_block[-1]).startswith(indent_level + ''' '''):
current_block.append(lines[index])
blocks.append('''\n'''.join(lowerCamelCase))
if index < len(lowerCamelCase) - 1:
__lowerCAmelCase = [lines[index + 1]]
index += 1
else:
__lowerCAmelCase = []
else:
blocks.append('''\n'''.join(lowerCamelCase))
__lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase) > 0:
blocks.append('''\n'''.join(lowerCamelCase))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase):
blocks.append('''\n'''.join(lines[index:]))
return blocks
def __magic_name__( lowerCamelCase):
def _inner(lowerCamelCase):
return key(lowerCamelCase).lower().replace('''_''', '''''')
return _inner
def __magic_name__( lowerCamelCase, lowerCamelCase=None):
# If no key is provided, we use a noop.
def noop(lowerCamelCase):
return x
if key is None:
__lowerCAmelCase = noop
# Constants are all uppercase, they go first.
__lowerCAmelCase = [obj for obj in objects if key(lowerCamelCase).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCAmelCase = [obj for obj in objects if key(lowerCamelCase)[0].isupper() and not key(lowerCamelCase).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCAmelCase = [obj for obj in objects if not key(lowerCamelCase)[0].isupper()]
__lowerCAmelCase = ignore_underscore(lowerCamelCase)
return sorted(lowerCamelCase, key=lowerCamelCase) + sorted(lowerCamelCase, key=lowerCamelCase) + sorted(lowerCamelCase, key=lowerCamelCase)
def __magic_name__( lowerCamelCase):
# This inner function sort imports between [ ].
def _replace(lowerCamelCase):
__lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
__lowerCAmelCase = [part.strip().replace('''"''', '''''') for part in imports.split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
__lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase)]) + "]"
__lowerCAmelCase = import_statement.split('''\n''')
if len(lowerCamelCase) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCAmelCase = 2 if lines[1].strip() == '''[''' else 1
__lowerCAmelCase = [(i, _re_strip_line.search(lowerCamelCase).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
__lowerCAmelCase = sort_objects(lowerCamelCase, key=lambda lowerCamelCase: x[1])
__lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(lowerCamelCase) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
__lowerCAmelCase = _re_bracket_content.sub(_replace, lines[1])
else:
__lowerCAmelCase = [part.strip().replace('''"''', '''''') for part in lines[1].split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
__lowerCAmelCase = keys[:-1]
__lowerCAmelCase = get_indent(lines[1]) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase)])
return "\n".join(lowerCamelCase)
else:
# Finally we have to deal with imports fitting on one line
__lowerCAmelCase = _re_bracket_content.sub(_replace, lowerCamelCase)
return import_statement
def __magic_name__( lowerCamelCase, lowerCamelCase=True):
with open(lowerCamelCase, encoding='''utf-8''') as f:
__lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCAmelCase = split_code_in_indented_blocks(
lowerCamelCase, start_prompt='''_import_structure = {''', end_prompt='''if TYPE_CHECKING:''')
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(lowerCamelCase) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCAmelCase = main_blocks[block_idx]
__lowerCAmelCase = block.split('''\n''')
# Get to the start of the imports.
__lowerCAmelCase = 0
while line_idx < len(lowerCamelCase) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCAmelCase = len(lowerCamelCase)
else:
line_idx += 1
if line_idx >= len(lowerCamelCase):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCAmelCase = '''\n'''.join(block_lines[line_idx:-1])
__lowerCAmelCase = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
__lowerCAmelCase = split_code_in_indented_blocks(lowerCamelCase, indent_level=lowerCamelCase)
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCAmelCase = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCAmelCase = [(pattern.search(lowerCamelCase).groups()[0] if pattern.search(lowerCamelCase) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCAmelCase = [(i, key) for i, key in enumerate(lowerCamelCase) if key is not None]
__lowerCAmelCase = [x[0] for x in sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCAmelCase = 0
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
__lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(lowerCamelCase)
count += 1
# And we put our main block back together with its first and last line.
__lowerCAmelCase = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(lowerCamelCase):
if check_only:
return True
else:
print(F"""Overwriting {file}.""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write('''\n'''.join(lowerCamelCase))
def __magic_name__( lowerCamelCase=True):
__lowerCAmelCase = []
for root, _, files in os.walk(lowerCamelCase):
if "__init__.py" in files:
__lowerCAmelCase = sort_imports(os.path.join(lowerCamelCase, '''__init__.py'''), check_only=lowerCamelCase)
if result:
__lowerCAmelCase = [os.path.join(lowerCamelCase, '''__init__.py''')]
if len(lowerCamelCase) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase)} files, run `make style`.""")
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_UpperCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 174
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case (self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCAmelCase = CLIPTextModel(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = CLIPTextModelWithProjection(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = sd_pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case (self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _snake_case (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs['''prompt''']]
__lowerCAmelCase = sd_pipe(**__lowercase )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
__lowerCAmelCase = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ):
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) )
__lowerCAmelCase = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 174
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
__lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'retribert'
def __init__( self : int ,_UpperCAmelCase : List[str]=30522 ,_UpperCAmelCase : Dict=768 ,_UpperCAmelCase : Tuple=8 ,_UpperCAmelCase : Optional[Any]=12 ,_UpperCAmelCase : Optional[Any]=3072 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Optional[Any]=0.1 ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Optional[int]=512 ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : List[str]=0.02 ,_UpperCAmelCase : Optional[Any]=1E-12 ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Tuple=128 ,_UpperCAmelCase : List[Any]=0 ,**_UpperCAmelCase : int ,):
super().__init__(pad_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Dict = vocab_size
_a : Optional[Any] = hidden_size
_a : Tuple = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = hidden_act
_a : Dict = intermediate_size
_a : List[str] = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : Union[str, Any] = max_position_embeddings
_a : List[Any] = type_vocab_size
_a : List[str] = initializer_range
_a : List[str] = layer_norm_eps
_a : int = share_encoders
_a : int = projection_dim
| 107
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[int] = 'distilbert'
lowerCAmelCase : Optional[int] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : str ,_UpperCAmelCase : Optional[Any]=30522 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : int=False ,_UpperCAmelCase : str=6 ,_UpperCAmelCase : int=12 ,_UpperCAmelCase : Union[str, Any]=768 ,_UpperCAmelCase : Optional[Any]=4 * 768 ,_UpperCAmelCase : int=0.1 ,_UpperCAmelCase : Union[str, Any]=0.1 ,_UpperCAmelCase : Tuple="gelu" ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : Dict=0.2 ,_UpperCAmelCase : Union[str, Any]=0 ,**_UpperCAmelCase : List[Any] ,):
_a : Optional[Any] = vocab_size
_a : Dict = max_position_embeddings
_a : Optional[int] = sinusoidal_pos_embds
_a : List[Any] = n_layers
_a : Any = n_heads
_a : int = dim
_a : Optional[Any] = hidden_dim
_a : str = dropout
_a : Optional[Any] = attention_dropout
_a : Any = activation
_a : Tuple = initializer_range
_a : Union[str, Any] = qa_dropout
_a : List[Any] = seq_classif_dropout
super().__init__(**_UpperCAmelCase ,pad_token_id=_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_a : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 107
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : int = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 224
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowercase__ : List[str] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = test_results.split(' ' )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCAmelCase_ : Any = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = {}
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : List[str] = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Union[str, Any] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCAmelCase_ : Union[str, Any] = line
lowerCAmelCase_ : int = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : str = title
lowerCAmelCase_ : Optional[int] = doc_test_results['time_spent'].split(',' )[0]
lowerCAmelCase_ : int = doc_test_results['success']
lowerCAmelCase_ : Dict = doc_test_results['failures']
lowerCAmelCase_ : Optional[int] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCAmelCase_ : Any = doc_test_results
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : int = [self._time_spent]
lowerCAmelCase_ : Any = 0
for time in time_spent:
lowerCAmelCase_ : Optional[Any] = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE_ ) == 1:
lowerCAmelCase_ : Any = [0, 0, time_parts[0]]
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F"{int(SCREAMING_SNAKE_CASE_ )}h{int(SCREAMING_SNAKE_CASE_ )}m{int(SCREAMING_SNAKE_CASE_ )}s"
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : int = 4_0
lowerCAmelCase_ : List[Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
lowerCAmelCase_ : List[str] = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE_ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : Tuple = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE_ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCAmelCase_ : Any = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.'
lowerCAmelCase_ : str = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : List[Any] = ''
for key, value in failures.items():
lowerCAmelCase_ : List[Any] = value[:2_0_0] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE_ ) > 2_5_0 else value
failures_text += F"*{key}*\n_{value}_\n\n"
lowerCAmelCase_ : int = job_name
lowerCAmelCase_ : Dict = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCAmelCase_ : str = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCAmelCase_ : Dict = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCAmelCase_ : Dict = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCAmelCase_ : Tuple = F"*Num failures* :{len(job_result['failed'] )} \n"
lowerCAmelCase_ : List[str] = job_result['failures']
lowerCAmelCase_ : Union[str, Any] = self.get_reply_blocks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"Results for {job}" , blocks=SCREAMING_SNAKE_CASE_ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCamelCase_ ( ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = os.environ['GITHUB_RUN_ID']
lowerCAmelCase_ : int = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
lowerCAmelCase_ : str = requests.get(lowerCAmelCase__ ).json()
lowerCAmelCase_ : List[str] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCAmelCase_ : Optional[Any] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
lowerCAmelCase_ : int = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , lowerCAmelCase__ )
return {}
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = {}
if os.path.exists(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = os.listdir(lowerCAmelCase__ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , encoding='utf-8' ) as f:
lowerCAmelCase_ : List[str] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}." ) from e
return _artifact
def UpperCamelCase_ ( ) -> Dict:
"""simple docstring"""
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : List[Any] = name
lowerCAmelCase_ : Tuple = []
def __str__( self : Optional[Any] ):
return self.name
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCAmelCase_ : Dict[str, Artifact] = {}
lowerCAmelCase_ : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCAmelCase_ : int = directory
if artifact_name not in _available_artifacts:
lowerCAmelCase_ : Optional[Any] = Artifact(lowerCAmelCase__ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase__ )
return _available_artifacts
if __name__ == "__main__":
lowercase__ : Optional[int] = get_job_links()
lowercase__ : Any = retrieve_available_artifacts()
lowercase__ : str = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowercase__ : Dict = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowercase__ : str = github_actions_job_links.get("""run_doctests""")
lowercase__ : int = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowercase__ : List[str] = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
lowercase__ , lowercase__ , lowercase__ : str = handle_test_results(artifact["""stats"""])
lowercase__ : Any = failed
lowercase__ : str = success
lowercase__ : int = time_spent[1:-1] + """, """
lowercase__ : Tuple = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
lowercase__ : List[str] = line.replace("""FAILED """, """""")
lowercase__ : Union[str, Any] = line.split()[0].replace("""\n""", """""")
if "::" in line:
lowercase__ , lowercase__ : Optional[Any] = line.split("""::""")
else:
lowercase__ , lowercase__ : int = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowercase__ : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowercase__ : List[Any] = all_failures[test] if test in all_failures else """N/A"""
lowercase__ : List[Any] = failure
break
lowercase__ : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 224
| 1
|
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(f'1/{temp + 1}' if series else '''1''' )
return series
if __name__ == "__main__":
lowerCAmelCase = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 93
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 93
| 1
|
def lowercase__ ( __snake_case : float , __snake_case : float , __snake_case : int ):
'''simple docstring'''
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ : Optional[int] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
from math import sqrt
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1_00_01 ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
while count != nth and number < 3:
number += 1
if is_prime(__UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 225
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : List[Any] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ["""CLIPFeatureExtractor"""]
__lowerCamelCase : List[Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase : Any = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase : str = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Tuple = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase , "Please use tf.data to implement this functionality." )
def A_ ( _lowerCAmelCase ) -> int:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
UpperCamelCase : Dict = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
UpperCamelCase : Optional[int] = _readaa(_lowerCAmelCase )
UpperCamelCase : int = _readaa(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = _readaa(_lowerCAmelCase )
UpperCamelCase : List[Any] = bytestream.read(rows * cols * num_images )
UpperCamelCase : List[str] = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
UpperCamelCase : Optional[Any] = data.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
return data
@deprecated(_lowerCAmelCase , "Please use tf.one_hot on tensors." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
UpperCamelCase : List[str] = labels_dense.shape[0]
UpperCamelCase : str = numpy.arange(_lowerCAmelCase ) * num_classes
UpperCamelCase : Optional[Any] = numpy.zeros((num_labels, num_classes) )
UpperCamelCase : Dict = 1
return labels_one_hot
@deprecated(_lowerCAmelCase , "Please use tf.data to implement this functionality." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=10 ) -> str:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
UpperCamelCase : int = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
UpperCamelCase : List[str] = _readaa(_lowerCAmelCase )
UpperCamelCase : List[Any] = bytestream.read(_lowerCAmelCase )
UpperCamelCase : List[str] = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase , _lowerCAmelCase )
return labels
class A__ :
@deprecated(
A_ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCamelCase : Optional[Any] = dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
UpperCamelCase : List[str] = 1_0000
UpperCamelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
UpperCamelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCamelCase : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCamelCase : str = images.astype(numpy.floataa )
UpperCamelCase : str = numpy.multiply(A_ , 1.0 / 2_55.0 )
UpperCamelCase : Optional[int] = images
UpperCamelCase : str = labels
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[int] = 0
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._images
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._labels
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._num_examples
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._epochs_completed
def __UpperCamelCase( self , A_ , A_=False , A_=True ):
'''simple docstring'''
if fake_data:
UpperCamelCase : Optional[int] = [1] * 784
UpperCamelCase : Optional[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
UpperCamelCase : Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCamelCase : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
UpperCamelCase : int = self.images[perma]
UpperCamelCase : Any = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCamelCase : List[Any] = self._num_examples - start
UpperCamelCase : Union[str, Any] = self._images[start : self._num_examples]
UpperCamelCase : str = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCamelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
UpperCamelCase : Union[str, Any] = self.images[perm]
UpperCamelCase : Union[str, Any] = self.labels[perm]
# Start next epoch
UpperCamelCase : Tuple = 0
UpperCamelCase : Tuple = batch_size - rest_num_examples
UpperCamelCase : List[str] = self._index_in_epoch
UpperCamelCase : Dict = self._images[start:end]
UpperCamelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCamelCase : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase , "Please write your own downloading logic." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase , _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[int] = f.size()
print("Successfully downloaded" , _lowerCAmelCase , _lowerCAmelCase , "bytes." )
return filepath
@deprecated(
_lowerCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=dtypes.floataa , _lowerCAmelCase=True , _lowerCAmelCase=5000 , _lowerCAmelCase=None , _lowerCAmelCase=DEFAULT_SOURCE_URL , ) -> List[str]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCAmelCase , one_hot=_lowerCAmelCase , dtype=_lowerCAmelCase , seed=_lowerCAmelCase )
UpperCamelCase : Any = fake()
UpperCamelCase : List[str] = fake()
UpperCamelCase : Union[str, Any] = fake()
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
if not source_url: # empty string check
UpperCamelCase : str = DEFAULT_SOURCE_URL
UpperCamelCase : List[str] = "train-images-idx3-ubyte.gz"
UpperCamelCase : Optional[int] = "train-labels-idx1-ubyte.gz"
UpperCamelCase : List[str] = "t10k-images-idx3-ubyte.gz"
UpperCamelCase : Union[str, Any] = "t10k-labels-idx1-ubyte.gz"
UpperCamelCase : Optional[int] = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_images_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : List[str] = _extract_images(_lowerCAmelCase )
UpperCamelCase : Dict = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : List[Any] = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
UpperCamelCase : Any = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_images_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : Any = _extract_images(_lowerCAmelCase )
UpperCamelCase : List[str] = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : str = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
UpperCamelCase : Any = (
"Validation size should be between 0 and "
F"""{len(_lowerCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(_lowerCAmelCase )
UpperCamelCase : str = train_images[:validation_size]
UpperCamelCase : int = train_labels[:validation_size]
UpperCamelCase : List[str] = train_images[validation_size:]
UpperCamelCase : Union[str, Any] = train_labels[validation_size:]
UpperCamelCase : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
UpperCamelCase : List[str] = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase : List[str] = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase : Any = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
| 140
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : Any=3 ,_snake_case : List[Any]=32 ,_snake_case : str=3 ,_snake_case : List[Any]=10 ,_snake_case : Any=[8, 16, 32, 64] ,_snake_case : Optional[int]=[1, 1, 2, 1] ,_snake_case : Dict=True ,_snake_case : Dict=True ,_snake_case : List[Any]="relu" ,_snake_case : int=3 ,_snake_case : Dict=None ,_snake_case : List[Any]=["stage2", "stage3", "stage4"] ,_snake_case : List[Any]=[2, 3, 4] ,_snake_case : int=1 ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = parent
lowercase__ : Dict = batch_size
lowercase__ : Any = image_size
lowercase__ : Dict = num_channels
lowercase__ : int = embeddings_size
lowercase__ : str = hidden_sizes
lowercase__ : Tuple = depths
lowercase__ : Tuple = is_training
lowercase__ : str = use_labels
lowercase__ : int = hidden_act
lowercase__ : List[Any] = num_labels
lowercase__ : Dict = scope
lowercase__ : Dict = len(_snake_case )
lowercase__ : Optional[int] = out_features
lowercase__ : List[str] = out_indices
lowercase__ : Any = num_groups
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : str ) -> int:
"""simple docstring"""
lowercase__ : Tuple = BitModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : str ,_snake_case : List[str] ,_snake_case : int ,_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[str] = BitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : str ) -> Dict:
"""simple docstring"""
lowercase__ : int = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase : List[str] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Any = False
lowerCAmelCase : str = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = BitModelTester(self )
lowercase__ : Tuple = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(config=_snake_case )
for name, module in model.named_modules():
if isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Optional[Any] ):
lowercase__ : Optional[int] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : int = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : Dict = layer_type
lowercase__ : Tuple = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = BitModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> List[str]:
lowercase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_snake_case )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : str = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )
# verify the logits
lowercase__ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : List[str] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
@require_torch
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase : Any = BitConfig
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = BitModelTester(self )
| 16
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87
| 0
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase_ = "."
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
UpperCamelCase_ = []
UpperCamelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase_ = line.strip()
UpperCamelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase_ = "\n".join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 344
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344
| 1
|
'''simple docstring'''
_UpperCamelCase = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 208
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCamelCase : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Union[str, Any] = CLIPTextModel(_a )
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : int = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Union[str, Any] , _a : Any , _a : List[str]=0 ) -> List[str]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Any = torch.manual_seed(_a )
else:
__lowerCamelCase : Any = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
__lowerCamelCase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : int = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : List[str] ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : str ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : Any ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowercase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : str ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Dict = CLIPTextModel(_a )
__lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : List[str] = MultiControlNetModel([controlneta, controlneta] )
__lowerCamelCase : str = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Dict , _a : int , _a : Union[str, Any]=0 ) -> Union[str, Any]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(_a )
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Any = 2
__lowerCamelCase : Any = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
__lowerCamelCase : List[Any] = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Dict = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Any = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**_a )
pipe.to(_a )
__lowerCamelCase : Optional[int] = 10.0
__lowerCamelCase : Tuple = 4
__lowerCamelCase : str = self.get_dummy_inputs(_a )
__lowerCamelCase : int = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Optional[Any] = pipe(**_a )[0]
__lowerCamelCase : Dict = self.get_dummy_inputs(_a )
__lowerCamelCase : List[str] = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Dict = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Union[str, Any] = steps
__lowerCamelCase : Any = scale
__lowerCamelCase : Optional[int] = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Tuple = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : List[str] = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowercase ( self : Optional[Any] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : Dict ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : str ) -> int:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowercase ( self : List[Any] ) -> str:
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : List[Any] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__lowerCamelCase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
__lowerCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase : List[Any] = 'evil space-punk bird'
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__lowerCamelCase : Any = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__lowerCamelCase : Dict = pipe(
_a , _a , control_image=_a , generator=_a , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 208
| 1
|
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
lowercase_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Optional[Any] ):
__snake_case : List[str] = get_test_to_tester_mapping(_lowerCAmelCase )
__snake_case : Optional[Any] = get_test_to_tester_mapping(_lowerCAmelCase )
__snake_case : Optional[int] = {"""BertModelTest""": """BertModelTester"""}
__snake_case : Dict = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = get_model_to_test_mapping(_lowerCAmelCase )
__snake_case : Optional[Any] = get_model_to_test_mapping(_lowerCAmelCase )
__snake_case : List[str] = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
__snake_case : str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case__ ( self : List[str] ):
__snake_case : Any = get_model_to_tester_mapping(_lowerCAmelCase )
__snake_case : Any = get_model_to_tester_mapping(_lowerCAmelCase )
__snake_case : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
__snake_case : str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 20
|
from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20
| 1
|
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_1_2,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def A ( snake_case :List[str] ) -> Optional[int]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : int = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 316
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return "".join(sorted(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return word_by_signature[signature(UpperCAmelCase_ )]
snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
snake_case : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 94
| 0
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__UpperCamelCase = 2048
__UpperCamelCase = 4096
__UpperCamelCase = 42
__UpperCamelCase = os.environ.pop("PROCESS_TRAIN", "false")
__UpperCamelCase = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
def choose_first(_lowerCamelCase , _lowerCamelCase=False ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
__snake_case : Any = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__snake_case : Union[str, Any] = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__snake_case : Union[str, Any] = {"""id""": example["""id"""]}
__snake_case : Any = example["""annotations"""]
__snake_case : List[Any] = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__snake_case : Tuple = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
__snake_case : Optional[Any] = []
__snake_case : Tuple = []
__snake_case : Optional[Any] = ["""<cls>"""]
else:
__snake_case : Union[str, Any] = ["""short"""]
__snake_case : Optional[Any] = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__snake_case : Dict = ["""long"""]
__snake_case : str = choose_first(annotation["""long_answer"""] , is_long_answer=_lowerCamelCase )
__snake_case : Optional[int] = []
answer.update(_lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__snake_case : List[Any] = True
else:
__snake_case : int = False
__snake_case : Any = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , _lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _get_single_answer(_lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case : Optional[Any] = example["""document"""]["""tokens"""]
__snake_case : Union[str, Any] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(_lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__snake_case : Union[str, Any] = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__snake_case : str = example["""document"""]["""tokens"""]
__snake_case : Any = answer["""start_token"""]
__snake_case : Optional[int] = answer["""end_token"""]
__snake_case : int = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__snake_case : Dict = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
__snake_case : str = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__snake_case : Union[str, Any] = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__snake_case : Dict = """ """.join([old[i] for i in range(len(_lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , _lowerCamelCase , end="""\n""" )
print("""Old:""" , _lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(_lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=2048 , _lowerCamelCase=4096 , _lowerCamelCase=True ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = get_context_and_ans(_lowerCamelCase , assertion=_lowerCamelCase )
__snake_case : Dict = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__snake_case : List[Any] = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__snake_case : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case : int = []
__snake_case : List[str] = []
__snake_case : Optional[int] = input_ids[:q_len]
__snake_case : Dict = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__snake_case : Dict = i + max_length - q_len
__snake_case : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_lowerCamelCase ),
"end_token": [-100] * len(_lowerCamelCase ),
"category": category,
},
}
__snake_case : Optional[int] = out["""context"""].split()
__snake_case : Optional[int] = splitted_context[answer["""end_token"""]]
__snake_case : Any = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_lowerCamelCase , ).input_ids )
__snake_case : Tuple = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__snake_case : Tuple = len(tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__snake_case : Union[str, Any] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__snake_case : Optional[int] = answer["""start_token"""]
__snake_case : List[Any] = answer["""end_token"""]
if assertion:
__snake_case : Any = tokenizer.decode(_lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , _lowerCamelCase , end="""\n\n""" )
if len(_lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__snake_case : List[Any] = input_ids[:q_len]
__snake_case : Tuple = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride )
__snake_case : Optional[Any] = []
__snake_case : List[Any] = []
__snake_case : Optional[int] = []
__snake_case : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
__snake_case : Dict = i + max_length - q_len
__snake_case : Union[str, Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__snake_case : str = start_token - i + q_len
__snake_case : int = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__snake_case : Optional[Any] = -100
__snake_case : Optional[Any] = -100
answers_category.append("""null""" )
__snake_case : List[str] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCamelCase )
answers_end_token.append(_lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(_lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(_lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=2048 , _lowerCamelCase=4096 , _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = get_strided_contexts_and_ans(
_lowerCamelCase , _lowerCamelCase , doc_stride=_lowerCamelCase , max_length=_lowerCamelCase , assertion=_lowerCamelCase , )
return example
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
with jsonlines.open(_lowerCamelCase , """a""" ) as writer:
for example in tqdm(_lowerCamelCase , total=len(_lowerCamelCase ) , desc="""Saving samples ... """ ):
__snake_case : int = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__UpperCamelCase = load_dataset("natural_questions")
__UpperCamelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
__UpperCamelCase = data["train" if PROCESS_TRAIN == "true" else "validation"]
__UpperCamelCase = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
__UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__UpperCamelCase = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
__UpperCamelCase = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 13
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def A_ ( _lowercase, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case_ :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def A_ ( _lowercase, _lowercase, _lowercase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ :Any = """"""
else:
snake_case_ :str = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ :str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ :Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ :Dict = in_proj_weight[
: config.hidden_size, :
]
snake_case_ :int = in_proj_bias[: config.hidden_size]
snake_case_ :str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ :str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ :Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = dct.pop(_lowercase )
snake_case_ :Tuple = val
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ :Union[str, Any] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw )
return im
@torch.no_grad()
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case_ :Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case_ :int = 1000
snake_case_ :Optional[int] = """huggingface/label-files"""
snake_case_ :List[Any] = """imagenet-1k-id2label.json"""
snake_case_ :str = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type="""dataset""" ), """r""" ) )
snake_case_ :Dict = {int(_lowercase ): v for k, v in idalabel.items()}
snake_case_ :Optional[Any] = idalabel
snake_case_ :Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case_ :Any = int(deit_name[-6:-4] )
snake_case_ :Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case_ :Tuple = 192
snake_case_ :Optional[int] = 768
snake_case_ :Tuple = 12
snake_case_ :Tuple = 3
elif deit_name[9:].startswith("""small""" ):
snake_case_ :List[Any] = 384
snake_case_ :Dict = 1536
snake_case_ :Optional[int] = 12
snake_case_ :str = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case_ :int = 1024
snake_case_ :List[Any] = 4096
snake_case_ :Any = 24
snake_case_ :Optional[int] = 16
# load original model from timm
snake_case_ :int = timm.create_model(_lowercase, pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ :Any = timm_model.state_dict()
snake_case_ :Optional[Any] = create_rename_keys(_lowercase, _lowercase )
for src, dest in rename_keys:
rename_key(_lowercase, _lowercase, _lowercase )
read_in_q_k_v(_lowercase, _lowercase, _lowercase )
# load HuggingFace model
snake_case_ :Union[str, Any] = DeiTForImageClassificationWithTeacher(_lowercase ).eval()
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case_ :Optional[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case_ :Any = DeiTImageProcessor(size=_lowercase, crop_size=config.image_size )
snake_case_ :List[str] = image_processor(images=prepare_img(), return_tensors="""pt""" )
snake_case_ :Optional[Any] = encoding["""pixel_values"""]
snake_case_ :Optional[Any] = model(_lowercase )
snake_case_ :Dict = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase, outputs.logits, atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 66
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case_ = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
snake_case_ = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
snake_case_ = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = RoFormerTokenizer
def __init__( self :Union[str, Any] , lowercase_ :Dict=None , lowercase_ :Union[str, Any]=None , lowercase_ :Optional[int]=True , lowercase_ :Dict="[UNK]" , lowercase_ :Optional[Any]="[SEP]" , lowercase_ :Dict="[PAD]" , lowercase_ :List[str]="[CLS]" , lowercase_ :int="[MASK]" , lowercase_ :Dict=True , lowercase_ :List[str]=None , **lowercase_ :Dict , ) -> Union[str, Any]:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , lowercase_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , lowercase_ ) != strip_accents
):
UpperCAmelCase = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = pre_tok_class(**lowercase_ )
UpperCAmelCase = do_lower_case
def __getstate__( self :List[str] ) -> int:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = BertPreTokenizer()
return state
def __setstate__( self :Union[str, Any] , lowercase_ :str ) -> Optional[Any]:
UpperCAmelCase = d
UpperCAmelCase = self.__dict__['_tokenizer'].get_vocab()
UpperCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(lowercase_ ) )
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :Optional[int]=None ) -> Union[str, Any]:
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :int=None , lowercase_ :Optional[int]=None , lowercase_ :Optional[Any]=False , **lowercase_ :str , ) -> Dict:
UpperCAmelCase = BertPreTokenizer()
return super().save_pretrained(lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
| 367
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
snake_case_ = None
try:
import msvcrt
except ImportError:
snake_case_ = None
try:
import fcntl
except ImportError:
snake_case_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
snake_case_ = OSError
# Data
# ------------------------------------------------
snake_case_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
snake_case_ = """3.0.12"""
snake_case_ = None
def _lowerCAmelCase ( ):
global _logger
UpperCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :int , lowercase_ :List[Any] ) -> Optional[int]:
UpperCAmelCase = lock_file
return None
def __str__( self :Dict ) -> Optional[int]:
UpperCAmelCase = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :Dict ) -> Dict:
UpperCAmelCase = lock
return None
def __enter__( self :Optional[int] ) -> Optional[int]:
return self.lock
def __exit__( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Any , lowercase_ :Union[str, Any] ) -> List[Any]:
self.lock.release()
return None
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :str , lowercase_ :str=-1 , lowercase_ :List[Any]=None ) -> Dict:
UpperCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
UpperCAmelCase = self.hash_filename_if_too_long(lowercase_ , lowercase_ )
# The path to the lock file.
UpperCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCAmelCase = None
# The default timeout value.
UpperCAmelCase = timeout
# We use this lock primarily for the lock counter.
UpperCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCAmelCase = 0
return None
@property
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
return self._lock_file
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple:
UpperCAmelCase = float(lowercase_ )
return None
def UpperCAmelCase__ ( self :Any ) -> List[str]:
raise NotImplementedError()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Dict=None , lowercase_ :int=0.05 ) -> Union[str, Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCAmelCase = id(self )
UpperCAmelCase = self._lock_file
UpperCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(lowercase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :List[str]=False ) -> int:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCAmelCase = id(self )
UpperCAmelCase = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCAmelCase = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self :Any ) -> Tuple:
self.acquire()
return self
def __exit__( self :Dict , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] ) -> Any:
self.release()
return None
def __del__( self :Any ) -> int:
self.release(force=lowercase_ )
return None
def UpperCAmelCase__ ( self :List[str] , lowercase_ :str , lowercase_ :int ) -> str:
UpperCAmelCase = os.path.basename(lowercase_ )
if len(lowercase_ ) > max_length and max_length > 0:
UpperCAmelCase = os.path.dirname(lowercase_ )
UpperCAmelCase = str(hash(lowercase_ ) )
UpperCAmelCase = filename[: max_length - len(lowercase_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowercase_ , lowercase_ )
else:
return path
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Tuple , lowercase_ :Tuple , lowercase_ :List[Any]=-1 , lowercase_ :Union[str, Any]=None ) -> Optional[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_ )
UpperCAmelCase = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self :List[str] ) -> str:
UpperCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCAmelCase = os.open(self._lock_file , lowercase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowercase_ )
else:
UpperCAmelCase = fd
return None
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
UpperCAmelCase = self._lock_file_fd
UpperCAmelCase = None
msvcrt.locking(lowercase_ , msvcrt.LK_UNLCK , 1 )
os.close(lowercase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :str , lowercase_ :List[str]=-1 , lowercase_ :str=None ) -> str:
UpperCAmelCase = os.statvfs(os.path.dirname(lowercase_ ) ).f_namemax
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Dict:
UpperCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCAmelCase = os.open(self._lock_file , lowercase_ )
try:
fcntl.flock(lowercase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowercase_ )
else:
UpperCAmelCase = fd
return None
def UpperCAmelCase__ ( self :List[Any] ) -> List[Any]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
UpperCAmelCase = self._lock_file_fd
UpperCAmelCase = None
fcntl.flock(lowercase_ , fcntl.LOCK_UN )
os.close(lowercase_ )
return None
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCAmelCase = os.open(self._lock_file , lowercase_ )
except OSError:
pass
else:
UpperCAmelCase = fd
return None
def UpperCAmelCase__ ( self :Optional[int] ) -> List[Any]:
os.close(self._lock_file_fd )
UpperCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
snake_case_ = None
if msvcrt:
snake_case_ = WindowsFileLock
elif fcntl:
snake_case_ = UnixFileLock
else:
snake_case_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 181
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( _lowercase , _lowercase ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_parquet_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Dict = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_parquet_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : Optional[int] = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_parquet_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def A ( _lowercase , _lowercase , _lowercase ):
if issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Tuple = parquet_path
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Any = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_parquet_dataset(snake_case_ , snake_case_ )
def A ( _lowercase , _lowercase , _lowercase=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
SCREAMING_SNAKE_CASE : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Dict = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Union[str, Any] = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader({'''train''': parquet_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A ( _lowercase , _lowercase , _lowercase ):
if split:
SCREAMING_SNAKE_CASE : Optional[int] = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''train'''
SCREAMING_SNAKE_CASE : Dict = {'''train''': parquet_path, '''test''': parquet_path}
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = ParquetDatasetWriter(snake_case_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Dict = pq.ParquetFile(tmp_path / '''foo.parquet''' )
SCREAMING_SNAKE_CASE : Optional[int] = pf.read()
assert dataset.data.table == output_table
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = str(shared_datadir / '''test_image_rgb.jpg''' )
SCREAMING_SNAKE_CASE : Dict = {'''image''': [image_path]}
SCREAMING_SNAKE_CASE : Any = Features({'''image''': Image()} )
SCREAMING_SNAKE_CASE : Any = Dataset.from_dict(snake_case_ , features=snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(snake_case_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : int = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=snake_case_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( _lowercase , _lowercase ):
assert get_writer_batch_size(snake_case_ ) == expected
| 182
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase_ : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowercase_ : str = []
lowercase_ : int = []
lowercase_ : Dict = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowercase_ : int = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
lowercase_ : int = 0
for log in Path().glob('*.log'):
lowercase_ : int = 0
with open(log, 'r') as f:
for line in f:
lowercase_ : List[str] = json.loads(line)
if line.get('nodeid', '') != "":
lowercase_ : List[str] = line['nodeid']
if line.get('duration', None) is not None:
lowercase_ : Tuple = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase_ : List[Any] = []
log.unlink()
lowercase_ : int = ''
lowercase_ : int = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowercase_ : Optional[Any] = []
lowercase_ : Any = {}
for test in failed_tests:
lowercase_ : List[str] = test[0].split('::')
lowercase_ : int = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowercase_ : Dict = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase_ : Any = [test[0] for test in failed_table]
lowercase_ : Optional[Any] = list(set(files))
# Count number of instances in failed_tests
lowercase_ : Optional[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase_ : Optional[Any] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowercase_ : List[Any] = 'Too many failed tests, please see the full report in the Action results.'
lowercase_ : Union[str, Any] = len(err) + 10
lowercase_ : Tuple = message[: 30_00 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowercase_ : int = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowercase_ : Union[str, Any] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowercase_ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowercase_ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowercase_ : List[str] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowercase_ : Any = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowercase_ : Any = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase_ : Optional[int] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase_ : Tuple = row[0]
else:
lowercase_ : Tuple = ''
lowercase_ : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 133
| 0
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class a_ ( unittest.TestCase ):
@property
def __a ( self :Optional[int]) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __a ( self :Union[str, Any]) -> str:
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def __a ( self :Tuple) -> Union[str, Any]:
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''')
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''')
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''A red cat sitting on a park bench'''
UpperCAmelCase_ = np.random.RandomState(0)
UpperCAmelCase_ = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_lowercase , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 344
|
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
| 1
|
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
"""simple docstring"""
a :Union[str, Any] = [1]
for i in range(2 , UpperCAmelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
a :Optional[Any] = []
a :List[Any] = list(range(UpperCAmelCase_ ) )
# Find permutation
while factorials:
a :List[Any] = factorials.pop()
a , a :List[str] = divmod(UpperCAmelCase_ , UpperCAmelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94
|
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ):
"""simple docstring"""
a , a :int = 1, 1
a :Any = 2
while True:
a :Optional[int] = 0
a :str = fa + fa
a , a :List[Any] = fa, f
index += 1
for _ in str(UpperCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94
| 1
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCAmelCase_ :
'''simple docstring'''
pass
| 363
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''convbert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=768 , _lowercase=2 , _lowercase=9 , _lowercase=1 , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = embedding_size
_lowerCAmelCase = head_ratio
_lowerCAmelCase = conv_kernel_size
_lowerCAmelCase = num_groups
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 229
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _snake_case :
lowerCAmelCase_ : int
lowerCAmelCase_ : TreeNode | None = None
lowerCAmelCase_ : TreeNode | None = None
_SCREAMING_SNAKE_CASE : List[str] = namedtuple("CoinsDistribResult", "moves excess")
def UpperCamelCase_( snake_case : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case ) != count_coins(snake_case ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ = get_distrib(node.left )
snake_case_ , snake_case_ = get_distrib(node.right )
snake_case_ = 1 - left_distrib_excess
snake_case_ = 1 - right_distrib_excess
snake_case_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case )
+ abs(snake_case )
)
snake_case_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case , snake_case )
return get_distrib(snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 343
| 0
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def a__ ( lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ , 0 , -1 ):
for _ in range(lowerCAmelCase__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def a__ ( lowerCAmelCase__ ):
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(lowerCAmelCase__ ) # upper half
reverse_floyd(lowerCAmelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCamelCase = 1
while K:
lowerCamelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCamelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 371
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCamelCase = TypeVar("""_T""")
class lowercase__ ( Generic[_T] ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Iterable[_T] | None = None ) -> None:
'''simple docstring'''
UpperCAmelCase_ = list(iterable or [] )
UpperCAmelCase_ = []
def __len__( self : Optional[int] ) -> int:
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : _T ) -> None:
'''simple docstring'''
self._stacka.append(_UpperCAmelCase )
def lowercase__ ( self : Dict ) -> _T:
'''simple docstring'''
UpperCAmelCase_ = self._stacka.pop
UpperCAmelCase_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241
| 0
|
import collections
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE : int = '''src/transformers'''
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE : Any = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE : Dict = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE : Dict = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE : Tuple = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE : int = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE : int = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_SCREAMING_SNAKE_CASE : Tuple = re.compile(r'''^\s*try:''')
# Catches a line with else:
_SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r'''^\s*else:''')
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if _re_test_backend.search(_A ) is None:
return None
SCREAMING_SNAKE_CASE__ = [b[0] for b in _re_backend.findall(_A )]
backends.sort()
return "_and_".join(_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
with open(_A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = 0
while line_index < len(_A ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_A ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_A ):
SCREAMING_SNAKE_CASE__ = _re_one_line_import_struct.search(_A ).groups()[0]
SCREAMING_SNAKE_CASE__ = re.findall(R'''\[([^\]]+)\]''' , _A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE__ = _re_import_struct_key_value.search(_A )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_A ) > 0]
objects.extend(_A )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
if _re_import_struct_add_one.search(_A ) is not None:
objects.append(_re_import_struct_add_one.search(_A ).groups()[0] )
elif _re_import_struct_add_many.search(_A ) is not None:
SCREAMING_SNAKE_CASE__ = _re_import_struct_add_many.search(_A ).groups()[0].split(''', ''' )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_between_brackets.search(_A ) is not None:
SCREAMING_SNAKE_CASE__ = _re_between_brackets.search(_A ).groups()[0].split(''', ''' )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_quote_object.search(_A ) is not None:
objects.append(_re_quote_object.search(_A ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE__ = []
while (
line_index < len(_A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_A ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
def find_duplicates(_A ):
return [k for k, v in collections.Counter(_A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE__ = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
SCREAMING_SNAKE_CASE__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE__ = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE__ = os.path.join(_A , '''__init__.py''' )
SCREAMING_SNAKE_CASE__ = parse_init(_A )
if objects is not None:
SCREAMING_SNAKE_CASE__ = analyze_results(*_A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_A ) )
if len(_A ) > 0:
raise ValueError('''\n\n'''.join(_A ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for path, directories, files in os.walk(_A ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_A ) / folder).glob('''*.py''' ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE__ = str((Path(_A ) / folder).relative_to(_A ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_A )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE__ = str((Path(_A ) / fname).relative_to(_A ) )
SCREAMING_SNAKE_CASE__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_A )
return submodules
_SCREAMING_SNAKE_CASE : List[Any] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE__ = direct_transformers_import(_A )
SCREAMING_SNAKE_CASE__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_A , '''__init__.py''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _A ) ) )
SCREAMING_SNAKE_CASE__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[Any] = """mock-s3-bucket"""
UpperCAmelCase_: str = F's3://{mock_bucket}'
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path.startswith("""s3://""" ) is False
UpperCAmelCase_: Tuple = """./local/path"""
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is True
UpperCAmelCase_: Optional[Any] = fsspec.filesystem("""file""" )
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: List[str] , lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: int = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
UpperCAmelCase_: Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_: str = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = os.path.basename(lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f, open(lowerCAmelCase__ , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Tuple , lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
UpperCAmelCase_: Tuple = compressed_file_paths[protocol]
UpperCAmelCase_: int = """dataset.jsonl"""
UpperCAmelCase_: Any = F'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase_ , *UpperCAmelCase_: Dict = fsspec.get_fs_token_paths(lowerCAmelCase__ )
assert fs.isfile(lowerCAmelCase__ )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Tuple = hf_api.dataset_info(lowerCAmelCase__ , token=lowerCAmelCase__ )
UpperCAmelCase_: List[str] = HfFileSystem(repo_info=lowerCAmelCase__ , token=lowerCAmelCase__ )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(lowerCAmelCase__ ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[str] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase__ , lowerCAmelCase__ , clobber=lowerCAmelCase__ )
with pytest.warns(lowerCAmelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase__ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 82
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[Any] = """mock-s3-bucket"""
UpperCAmelCase_: str = F's3://{mock_bucket}'
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path.startswith("""s3://""" ) is False
UpperCAmelCase_: Tuple = """./local/path"""
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is True
UpperCAmelCase_: Optional[Any] = fsspec.filesystem("""file""" )
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: List[str] , lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: int = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
UpperCAmelCase_: Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_: str = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = os.path.basename(lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f, open(lowerCAmelCase__ , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Tuple , lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
UpperCAmelCase_: Tuple = compressed_file_paths[protocol]
UpperCAmelCase_: int = """dataset.jsonl"""
UpperCAmelCase_: Any = F'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase_ , *UpperCAmelCase_: Dict = fsspec.get_fs_token_paths(lowerCAmelCase__ )
assert fs.isfile(lowerCAmelCase__ )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Tuple = hf_api.dataset_info(lowerCAmelCase__ , token=lowerCAmelCase__ )
UpperCAmelCase_: List[str] = HfFileSystem(repo_info=lowerCAmelCase__ , token=lowerCAmelCase__ )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(lowerCAmelCase__ ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[str] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase__ , lowerCAmelCase__ , clobber=lowerCAmelCase__ )
with pytest.warns(lowerCAmelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase__ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 82
| 1
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Tuple ) -> None:
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" ,lowerCamelCase__ ,)
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
| 296
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 296
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase =logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_lowerCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _A ( ):
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
datasets.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
A = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
A = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = train_dataset.features["""label"""].names
if training_args.do_eval:
A = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = eval_dataset.features["""label"""].names
if training_args.do_predict:
A = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = predict_dataset.features["""label"""].names
# Labels
A = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel={str(UpperCAmelCase__ ): label for i, label in enumerate(UpperCAmelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
A = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A = False
def preprocess_function(_a : Tuple ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=UpperCAmelCase__ , max_length=data_args.max_seq_length , truncation=UpperCAmelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
A = min(len(UpperCAmelCase__ ) , data_args.max_train_samples )
A = train_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
A = train_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCAmelCase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A = min(len(UpperCAmelCase__ ) , data_args.max_eval_samples )
A = eval_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
A = eval_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
A = min(len(UpperCAmelCase__ ) , data_args.max_predict_samples )
A = predict_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
A = predict_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
A = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a : EvalPrediction ):
A = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase__ ) else p.predictions
A = np.argmax(UpperCAmelCase__ , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A = default_data_collator
elif training_args.fpaa:
A = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 )
else:
A = None
# Initialize our Trainer
A = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
A = train_result.metrics
A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ )
)
A = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , UpperCAmelCase__ )
trainer.save_metrics("""train""" , UpperCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A = trainer.evaluate(eval_dataset=UpperCAmelCase__ )
A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ )
A = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
A = trainer.predict(UpperCAmelCase__ , metric_key_prefix="""predict""" )
A = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCAmelCase__ )
)
A = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics("""predict""" , UpperCAmelCase__ )
trainer.save_metrics("""predict""" , UpperCAmelCase__ )
A = np.argmax(UpperCAmelCase__ , axis=1 )
A = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(UpperCAmelCase__ ):
A = label_list[item]
writer.write(f'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 371
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCAmelCase =parser.parse_args()
UpperCAmelCase =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase =CLIPImageProcessor()
UpperCAmelCase =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCAmelCase =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 77
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> tuple:
return (data["data"], data["target"])
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> XGBClassifier:
lowercase : Tuple = XGBClassifier()
classifier.fit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return classifier
def _snake_case( ) -> None:
lowercase : Optional[Any] = load_iris()
lowercase , lowercase : Tuple = data_handling(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase , lowercase : Tuple = train_test_split(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , test_size=0.25 )
lowercase : Union[str, Any] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
lowercase : Dict = xgboost(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , display_labels=SCREAMING_SNAKE_CASE__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 20
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __snake_case :
_a : int
_a : TreeNode | None= None
_a : TreeNode | None= None
lowercase : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE__ ) != count_coins(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase , lowercase : int = get_distrib(node.left )
lowercase , lowercase : List[Any] = get_distrib(node.right )
lowercase : Optional[Any] = 1 - left_distrib_excess
lowercase : Union[str, Any] = 1 - right_distrib_excess
lowercase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE__ )
+ abs(SCREAMING_SNAKE_CASE__ )
)
lowercase : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return get_distrib(SCREAMING_SNAKE_CASE__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
| 1
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ = 2
class __snake_case :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
__lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=None , ):
'''simple docstring'''
__A : Dict = bos, unk, pad, eos
__A : List[Any] = []
__A : str = []
__A : Optional[Any] = {}
__A : List[Any] = self.add_symbol(__lowerCamelCase )
__A : Optional[int] = self.add_symbol(__lowerCamelCase )
__A : str = self.add_symbol(__lowerCamelCase )
__A : List[Any] = self.add_symbol(__lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__lowerCamelCase )
__A : int = len(self.symbols )
def __eq__( self , __lowerCamelCase ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self , __lowerCamelCase ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self , __lowerCamelCase ):
'''simple docstring'''
return sym in self.indices
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase ):
'''simple docstring'''
__A : str = cls()
d.add_from_file(__lowerCamelCase )
return d
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=1 , __lowerCamelCase=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
__A : int = self.indices[word]
__A : Optional[int] = self.count[idx] + n
return idx
else:
__A : int = len(self.symbols )
__A : Tuple = idx
self.symbols.append(__lowerCamelCase )
self.count.append(__lowerCamelCase )
return idx
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return 0
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__lowerCamelCase ) )
return
__A : List[str] = f.readlines()
__A : Optional[Any] = self._load_meta(__lowerCamelCase )
for line in lines[indices_start_line:]:
try:
__A : str = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
__A : Optional[int] = True
__A : str = line.rsplit(''' ''' , 1 )
else:
__A : List[Any] = False
__A : Union[str, Any] = int(__lowerCamelCase )
__A : List[Any] = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(__lowerCamelCase ) )
self.add_symbol(__lowerCamelCase , n=__lowerCamelCase , overwrite=__lowerCamelCase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def __lowercase ( snake_case_ : Any ) ->Any:
'''simple docstring'''
__A : List[Any] = dict((re.sub(r'''@@$''' ,'''''' ,snake_case_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' ,'''</w>''' ,snake_case_ ), v) for k, v in d.items() )
__A : str = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__A : List[str] = d[k] # restore
return da
def __lowercase ( snake_case_ : Dict ,snake_case_ : List[str] ) ->Any:
'''simple docstring'''
if not os.path.exists(snake_case_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(snake_case_ ,exist_ok=snake_case_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__A : Dict = os.path.join(snake_case_ ,'''checkpoint.pt''' )
if not os.path.isfile(snake_case_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__A : List[str] = torch.load(snake_case_ ,map_location='''cpu''' )
__A : Optional[Any] = chkpt['''cfg''']['''model''']
# dicts
__A : int = os.path.join(snake_case_ ,'''dict.txt''' )
if not os.path.isfile(snake_case_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__A : Optional[Any] = Dictionary.load(snake_case_ )
__A : Any = rewrite_dict_keys(src_dict.indices )
__A : Union[str, Any] = len(snake_case_ )
__A : str = os.path.join(snake_case_ ,VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(snake_case_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case_ ,ensure_ascii=snake_case_ ,indent=snake_case_ ) )
# merges_file (bpecodes)
__A : Dict = os.path.join(snake_case_ ,'''bpecodes''' )
if not os.path.isfile(snake_case_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__A : Tuple = os.path.join(snake_case_ ,VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(snake_case_ ,snake_case_ )
# model config
__A : Dict = os.path.join(snake_case_ ,'''config.json''' )
__A : List[str] = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(snake_case_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case_ ,ensure_ascii=snake_case_ ,indent=snake_case_ ) )
# tokenizer config
__A : List[Any] = os.path.join(snake_case_ ,snake_case_ )
__A : List[str] = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(snake_case_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case_ ,ensure_ascii=snake_case_ ,indent=snake_case_ ) )
# model
__A : Dict = chkpt['''model''']
# remove unneeded keys
__A : Dict = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(snake_case_ ,snake_case_ )
__A : Union[str, Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
__A : List[Any] = model_state_dict.pop(snake_case_ )
else:
__A : Optional[int] = model_state_dict.pop(snake_case_ )
__A : Dict = BioGptConfig.from_pretrained(snake_case_ )
__A : int = BioGptForCausalLM(snake_case_ )
# check that it loads ok
model_new.load_state_dict(snake_case_ )
# save
__A : Optional[int] = os.path.join(snake_case_ ,snake_case_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(snake_case_ ,snake_case_ )
print('''Conversion is done!''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 371
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a_ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
a_ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
a_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowercase ( snake_case_ : str ) ->dict[str, int]:
'''simple docstring'''
__A : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowercase ( snake_case_ : tuple ) ->str:
'''simple docstring'''
return x[0]
def __lowercase ( snake_case_ : str ) ->str:
'''simple docstring'''
__A : Union[str, Any] = get_letter_count(snake_case_ )
__A : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(snake_case_ )
__A : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=snake_case_ )
__A : Optional[int] = ''''''.join(freq_to_letter[freq] )
__A : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=snake_case_ ,reverse=snake_case_ )
__A : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(snake_case_ )
def __lowercase ( snake_case_ : str ) ->int:
'''simple docstring'''
__A : Any = get_frequency_order(snake_case_ )
__A : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
| 0
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowerCAmelCase : Optional[int] = 2048
lowerCAmelCase : Dict = 4096
lowerCAmelCase : Any = 42
lowerCAmelCase : str = os.environ.pop("""PROCESS_TRAIN""", """false""")
lowerCAmelCase : Optional[Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def A_ ( _UpperCAmelCase ):
def choose_first(_UpperCAmelCase , _UpperCAmelCase=False ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE_: Dict = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
SCREAMING_SNAKE_CASE_: Optional[Any] = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
SCREAMING_SNAKE_CASE_: Optional[int] = {"id": example["id"]}
SCREAMING_SNAKE_CASE_: Dict = example["annotations"]
SCREAMING_SNAKE_CASE_: List[Any] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
SCREAMING_SNAKE_CASE_: List[str] = ["yes"] if 1 in yes_no_answer else ["no"]
SCREAMING_SNAKE_CASE_: Tuple = []
SCREAMING_SNAKE_CASE_: Optional[Any] = []
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["<cls>"]
else:
SCREAMING_SNAKE_CASE_: List[str] = ["short"]
SCREAMING_SNAKE_CASE_: Optional[Any] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
SCREAMING_SNAKE_CASE_: List[str] = ["long"]
SCREAMING_SNAKE_CASE_: str = choose_first(annotation["long_answer"] , is_long_answer=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = []
answer.update(_UpperCAmelCase )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
SCREAMING_SNAKE_CASE_: str = True
else:
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Tuple = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , _UpperCAmelCase ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: Optional[Any] = _get_single_answer(_UpperCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE_: Tuple = example["document"]["tokens"]
SCREAMING_SNAKE_CASE_: Any = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(_UpperCAmelCase ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
SCREAMING_SNAKE_CASE_: Optional[int] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
SCREAMING_SNAKE_CASE_: List[str] = example["document"]["tokens"]
SCREAMING_SNAKE_CASE_: Optional[int] = answer["start_token"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = answer["end_token"]
SCREAMING_SNAKE_CASE_: Any = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
SCREAMING_SNAKE_CASE_: Optional[Any] = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
SCREAMING_SNAKE_CASE_: Optional[Any] = doc["is_html"][answer["start_token"] : answer["end_token"]]
SCREAMING_SNAKE_CASE_: Tuple = doc["token"][answer["start_token"] : answer["end_token"]]
SCREAMING_SNAKE_CASE_: str = " ".join([old[i] for i in range(len(_UpperCAmelCase ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , _UpperCAmelCase , end="\n" )
print("Old:" , _UpperCAmelCase , end="\n\n" )
return {
"context": " ".join(_UpperCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=20_48 , _UpperCAmelCase=40_96 , _UpperCAmelCase=True ):
# overlap will be of doc_stride - q_len
SCREAMING_SNAKE_CASE_: Dict = get_context_and_ans(_UpperCAmelCase , assertion=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer(example["question"]["text"] , out["context"] ).input_ids
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
SCREAMING_SNAKE_CASE_: Tuple = input_ids[:q_len]
SCREAMING_SNAKE_CASE_: int = range(_UpperCAmelCase , len(_UpperCAmelCase ) , max_length - doc_stride )
for i in doc_start_indices:
SCREAMING_SNAKE_CASE_: Optional[int] = i + max_length - q_len
SCREAMING_SNAKE_CASE_: Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(_UpperCAmelCase ),
"end_token": [-1_00] * len(_UpperCAmelCase ),
"category": category,
},
}
SCREAMING_SNAKE_CASE_: Dict = out["context"].split()
SCREAMING_SNAKE_CASE_: Optional[int] = splitted_context[answer["end_token"]]
SCREAMING_SNAKE_CASE_: Union[str, Any] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=_UpperCAmelCase , ).input_ids )
SCREAMING_SNAKE_CASE_: int = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=_UpperCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
SCREAMING_SNAKE_CASE_: List[str] = len(tokenizer(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
SCREAMING_SNAKE_CASE_: List[str] = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
SCREAMING_SNAKE_CASE_: List[Any] = answer["start_token"]
SCREAMING_SNAKE_CASE_: List[Any] = answer["end_token"]
if assertion:
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.decode(_UpperCAmelCase )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , _UpperCAmelCase , end="\n\n" )
if len(_UpperCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
SCREAMING_SNAKE_CASE_: Union[str, Any] = input_ids[:q_len]
SCREAMING_SNAKE_CASE_: List[str] = range(_UpperCAmelCase , len(_UpperCAmelCase ) , max_length - doc_stride )
SCREAMING_SNAKE_CASE_: Tuple = []
SCREAMING_SNAKE_CASE_: Optional[Any] = []
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = [] # null, yes, no, long, short
for i in doc_start_indices:
SCREAMING_SNAKE_CASE_: List[Any] = i + max_length - q_len
SCREAMING_SNAKE_CASE_: Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
SCREAMING_SNAKE_CASE_: int = start_token - i + q_len
SCREAMING_SNAKE_CASE_: Optional[Any] = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = -1_00
SCREAMING_SNAKE_CASE_: Union[str, Any] = -1_00
answers_category.append("null" )
SCREAMING_SNAKE_CASE_: List[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCAmelCase )
answers_end_token.append(_UpperCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(_UpperCAmelCase ) )
print("Old:" , tokenizer.decode(_UpperCAmelCase ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=20_48 , _UpperCAmelCase=40_96 , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: str = get_strided_contexts_and_ans(
_UpperCAmelCase , _UpperCAmelCase , doc_stride=_UpperCAmelCase , max_length=_UpperCAmelCase , assertion=_UpperCAmelCase , )
return example
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
with jsonlines.open(_UpperCAmelCase , "a" ) as writer:
for example in tqdm(_UpperCAmelCase , total=len(_UpperCAmelCase ) , desc="Saving samples ... " ):
SCREAMING_SNAKE_CASE_: Tuple = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowerCAmelCase : str = load_dataset("""natural_questions""")
lowerCAmelCase : Dict = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
lowerCAmelCase : str = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
lowerCAmelCase : str = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
lowerCAmelCase : Optional[Any] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowerCAmelCase : Dict = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
lowerCAmelCase : Optional[int] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 13
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13
| 1
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__UpperCAmelCase = data_utils.TransfoXLTokenizer
__UpperCAmelCase = data_utils.TransfoXLCorpus
__UpperCAmelCase = data_utils
__UpperCAmelCase = data_utils
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
UpperCamelCase : int = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase : List[Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCamelCase : Union[str, Any] = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
UpperCamelCase : List[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
UpperCamelCase : Optional[Any] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase : Optional[int] = os.path.abspath(snake_case__ )
UpperCamelCase : Dict = os.path.abspath(snake_case__ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase : Dict = TransfoXLConfig()
else:
UpperCamelCase : Union[str, Any] = TransfoXLConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase : List[str] = TransfoXLLMHeadModel(snake_case__ )
UpperCamelCase : Optional[int] = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
UpperCamelCase : Optional[Any] = os.path.join(snake_case__ , snake_case__ )
UpperCamelCase : Any = os.path.join(snake_case__ , snake_case__ )
print(F"""Save PyTorch model to {os.path.abspath(snake_case__ )}""" )
torch.save(model.state_dict() , snake_case__ )
print(F"""Save configuration file to {os.path.abspath(snake_case__ )}""" )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
__UpperCAmelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 103
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer
UpperCAmelCase__ : Optional[int] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
UpperCamelCase : List[str] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase : Dict = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
UpperCamelCase : Dict = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase : Optional[int] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
| 103
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : TransformeraDModel , lowercase_ : AutoencoderKL , lowercase_ : KarrasDiffusionSchedulers , lowercase_ : Optional[Dict[int, str]] = None , ) -> int:
super().__init__()
self.register_modules(transformer=lowercase_ , vae=lowercase_ , scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase : Any = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
UpperCAmelCase : str = int(lowercase_ )
UpperCAmelCase : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , lowercase_ : List[int] , lowercase_ : float = 4.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : Tuple = len(lowercase_ )
UpperCAmelCase : List[str] = self.transformer.config.sample_size
UpperCAmelCase : int = self.transformer.config.in_channels
UpperCAmelCase : List[str] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase : int = torch.tensor(lowercase_ , device=self.device ).reshape(-1 )
UpperCAmelCase : Any = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase : Tuple = latent_model_input[: len(lowercase_ ) // 2]
UpperCAmelCase : List[Any] = torch.cat([half, half] , dim=0 )
UpperCAmelCase : List[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase : Optional[Any] = latent_model_input.device.type == 'mps'
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Any = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase : Tuple = torch.intaa if is_mps else torch.intaa
UpperCAmelCase : Optional[int] = torch.tensor([timesteps] , dtype=lowercase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase : Optional[int] = self.transformer(
lowercase_ , timestep=lowercase_ , class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase , UpperCAmelCase : Tuple = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase , UpperCAmelCase : Any = torch.split(lowercase_ , len(lowercase_ ) // 2 , dim=0 )
UpperCAmelCase : List[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase : Any = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase , UpperCAmelCase : Dict = torch.split(lowercase_ , lowercase_ , dim=1 )
else:
UpperCAmelCase : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase : int = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase , UpperCAmelCase : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase : Dict = latent_model_input
UpperCAmelCase : Any = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase : Optional[int] = self.vae.decode(lowercase_ ).sample
UpperCAmelCase : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 151
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowercase__ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
lowercase__ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
lowercase__ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple=4 , lowercase_ : List[str]=False ) -> Union[str, Any]:
UpperCAmelCase : Tuple = compute_bleu(
reference_corpus=lowercase_ , translation_corpus=lowercase_ , max_order=lowercase_ , smooth=lowercase_ )
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 151
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( a , a ) -> str:
'''simple docstring'''
__magic_name__ = BeautifulSoup(requests.get(a , params=a ).content , '''html.parser''' )
__magic_name__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__magic_name__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCAmelCase = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 98
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("fixtures")
_lowerCAmelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCAmelCase = get_tests_dir("fixtures/dummy-config.json")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = 0
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__magic_name__ = WavaVecaFeatureExtractor(**a__ )
# save in new folder
model_config.save_pretrained(a__ )
config.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
# make sure private variable is not incorrectly saved
__magic_name__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , revision='''aaaaaa''' )
def snake_case__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def snake_case__ ( self : int ):
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoFeatureExtractor.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : int ):
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = True
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# If remote code is not set, the default is to use local
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(a__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 98
| 1
|
'''simple docstring'''
from __future__ import annotations
lowercase__ : Optional[int] = tuple[int, int, int]
lowercase__ : Optional[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase__ : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase__ : Dict = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowercase__ : Tuple = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowercase__ : Union[str, Any] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowercase__ : Dict = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowercase__ : int = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowercase__ : Dict = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowercase__ : Optional[int] = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowercase__ : Any = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowercase__ : Optional[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowercase__ : Optional[Any] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def a__ ( lowercase : RotorPositionT, lowercase : RotorSelectionT, lowercase : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(lowercase ) )) < 3:
_UpperCamelCase = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowercase )
# Checks if rotor positions are valid
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = rotpos
if not 0 < rotorposa <= len(lowercase ):
_UpperCamelCase = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowercase )
if not 0 < rotorposa <= len(lowercase ):
_UpperCamelCase = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowercase )
if not 0 < rotorposa <= len(lowercase ):
_UpperCamelCase = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowercase )
# Validates string and returns dict
_UpperCamelCase = _plugboard(lowercase )
return rotpos, rotsel, pbdict
def a__ ( lowercase : str ) -> dict[str, str]:
"""simple docstring"""
if not isinstance(lowercase, lowercase ):
_UpperCamelCase = F"""Plugboard setting isn't type string ({type(lowercase )})"""
raise TypeError(lowercase )
elif len(lowercase ) % 2 != 0:
_UpperCamelCase = F"""Odd number of symbols ({len(lowercase )})"""
raise Exception(lowercase )
elif pbstring == "":
return {}
pbstring.replace(''' ''', '''''' )
# Checks if all characters are unique
_UpperCamelCase = set()
for i in pbstring:
if i not in abc:
_UpperCamelCase = F"""'{i}' not in list of symbols"""
raise Exception(lowercase )
elif i in tmppbl:
_UpperCamelCase = F"""Duplicate symbol ({i})"""
raise Exception(lowercase )
else:
tmppbl.add(lowercase )
del tmppbl
# Created the dictionary
_UpperCamelCase = {}
for j in range(0, len(lowercase ) - 1, 2 ):
_UpperCamelCase = pbstring[j + 1]
_UpperCamelCase = pbstring[j]
return pb
def a__ ( lowercase : str, lowercase : RotorPositionT, lowercase : RotorSelectionT = (rotora, rotora, rotora), lowercase : str = "", ) -> str:
"""simple docstring"""
_UpperCamelCase = text.upper()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = _validator(
lowercase, lowercase, plugb.upper() )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = rotor_position
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_UpperCamelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_UpperCamelCase = plugboard[symbol]
# rotor ra --------------------------
_UpperCamelCase = abc.index(lowercase ) + rotorposa
_UpperCamelCase = rotora[index % len(lowercase )]
# rotor rb --------------------------
_UpperCamelCase = abc.index(lowercase ) + rotorposa
_UpperCamelCase = rotora[index % len(lowercase )]
# rotor rc --------------------------
_UpperCamelCase = abc.index(lowercase ) + rotorposa
_UpperCamelCase = rotora[index % len(lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_UpperCamelCase = reflector[symbol]
# 2nd rotors
_UpperCamelCase = abc[rotora.index(lowercase ) - rotorposa]
_UpperCamelCase = abc[rotora.index(lowercase ) - rotorposa]
_UpperCamelCase = abc[rotora.index(lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_UpperCamelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowercase ):
_UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(lowercase ):
_UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(lowercase ):
_UpperCamelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowercase )
return "".join(lowercase )
if __name__ == "__main__":
lowercase__ : str = 'This is my Python script that emulates the Enigma machine from WWII.'
lowercase__ : Union[str, Any] = (1, 1, 1)
lowercase__ : Any = 'pictures'
lowercase__ : str = (rotora, rotora, rotora)
lowercase__ : Any = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 324
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {'vocab_file': 'spiece.model'}
lowercase__ : Dict = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowercase__ : Optional[Any] = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = ['input_ids', 'attention_mask']
_snake_case : List[int] = []
def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ )
return token
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = ''''''
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
_UpperCamelCase = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ )
_UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase = []
_UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
_UpperCamelCase = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) )
else:
_UpperCamelCase = ''''''.join(lowerCAmelCase__ )
_UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 324
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 65
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
A_ , A_ , A_ : Any = equationa
A_ , A_ , A_ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
A_ : Optional[Any] = aa * ba - aa * ba
A_ : Optional[int] = ca * ba - ca * ba
A_ : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ : Optional[int] = determinant_x / determinant
A_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 65
| 1
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase :List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCamelCase :Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCamelCase :Dict = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
__UpperCamelCase :int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
__UpperCamelCase :Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=False , __lowercase=99 , __lowercase=16 , __lowercase=2 , __lowercase=4 , __lowercase=4 , __lowercase="relu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=20 , __lowercase=2 , __lowercase=1 , __lowercase=0 , ) -> Tuple:
__UpperCamelCase :int = parent
__UpperCamelCase :Optional[int] = batch_size
__UpperCamelCase :int = seq_length
__UpperCamelCase :Tuple = is_training
__UpperCamelCase :Optional[int] = use_labels
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :int = num_hidden_layers
__UpperCamelCase :str = num_attention_heads
__UpperCamelCase :Any = intermediate_size
__UpperCamelCase :Any = hidden_act
__UpperCamelCase :Union[str, Any] = hidden_dropout_prob
__UpperCamelCase :Any = attention_probs_dropout_prob
__UpperCamelCase :List[str] = encoder_layerdrop
__UpperCamelCase :List[Any] = decoder_layerdrop
__UpperCamelCase :Tuple = max_position_embeddings
__UpperCamelCase :Tuple = eos_token_id
__UpperCamelCase :Optional[Any] = pad_token_id
__UpperCamelCase :Any = bos_token_id
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Any = self.eos_token_id # Eos Token
__UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCamelCase :Union[str, Any] = input_ids.clamp(self.pad_token_id + 1)
__UpperCamelCase :List[str] = decoder_input_ids.clamp(self.pad_token_id + 1)
__UpperCamelCase :Any = self.get_config()
__UpperCamelCase :List[Any] = prepare_mam_aaa_inputs_dict(__lowercase , __lowercase , __lowercase)
return config, inputs_dict
def UpperCamelCase__ ( self) -> List[Any]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase , __UpperCamelCase :List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Tuple = MaMaaaModel(config=__lowercase).get_decoder().to(__lowercase).eval()
__UpperCamelCase :str = inputs_dict['''input_ids''']
__UpperCamelCase :Union[str, Any] = inputs_dict['''attention_mask''']
__UpperCamelCase :List[str] = inputs_dict['''head_mask''']
# first forward pass
__UpperCamelCase :Dict = model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase)
__UpperCamelCase , __UpperCamelCase :Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCamelCase :Union[str, Any] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCamelCase :List[str] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCamelCase :Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCamelCase :Union[str, Any] = model(__lowercase , attention_mask=__lowercase)['''last_hidden_state''']
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase)[
'''last_hidden_state'''
]
# select random slice
__UpperCamelCase :List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCamelCase :Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase :Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-2))
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> int:
__UpperCamelCase :Dict = MaMaaaModel(config=__lowercase).to(__lowercase).eval()
__UpperCamelCase :Tuple = model(**__lowercase)
__UpperCamelCase :List[Any] = outputs.encoder_last_hidden_state
__UpperCamelCase :List[str] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :List[str] = model.get_encoder()
encoder.save_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = MaMaaaEncoder.from_pretrained(__lowercase).to(__lowercase)
__UpperCamelCase :Tuple = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :Any = model.get_decoder()
decoder.save_pretrained(__lowercase)
__UpperCamelCase :str = MaMaaaDecoder.from_pretrained(__lowercase).to(__lowercase)
__UpperCamelCase :Any = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=__lowercase , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3)
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a__ : str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a__ : str = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a__ : int = True
a__ : int = True
a__ : Union[str, Any] = False
a__ : int = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[int] = MaMaaaModelTester(self)
__UpperCamelCase :Optional[int] = ConfigTester(self , config_class=__lowercase)
def UpperCamelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCamelCase :str = model_class(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase)
__UpperCamelCase , __UpperCamelCase :Dict = model_class.from_pretrained(__lowercase , output_loading_info=__lowercase)
self.assertEqual(info['''missing_keys'''] , [])
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase , __UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__UpperCamelCase :Optional[Any] = model_class(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Union[str, Any] = copy.deepcopy(self._prepare_for_class(__lowercase , __lowercase))
if not self.is_encoder_decoder:
__UpperCamelCase :Dict = inputs['''input_ids''']
del inputs["input_ids"]
else:
__UpperCamelCase :str = inputs['''input_ids''']
__UpperCamelCase :Dict = inputs.get('''decoder_input_ids''' , __lowercase)
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , __lowercase)
__UpperCamelCase :Union[str, Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__UpperCamelCase :List[Any] = wte(__lowercase)
else:
__UpperCamelCase :int = wte(__lowercase)
__UpperCamelCase :List[str] = wte(__lowercase)
with torch.no_grad():
model(**__lowercase)[0]
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase , __UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase :Dict = input_dict['''input_ids''']
__UpperCamelCase :Any = input_ids.ne(1).to(__lowercase)
__UpperCamelCase :Optional[int] = MaMaaaForConditionalGeneration(__lowercase).eval().to(__lowercase)
if torch_device == "cuda":
model.half()
model.generate(__lowercase , attention_mask=__lowercase)
model.generate(num_beams=4 , do_sample=__lowercase , early_stopping=__lowercase , num_return_sequences=3)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
__lowercase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> int:
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''')
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''').to(__lowercase)
__UpperCamelCase :Dict = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]])
__UpperCamelCase :int = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]])
__UpperCamelCase :int = prepare_mam_aaa_inputs_dict(model.config , __lowercase , __lowercase)
with torch.no_grad():
__UpperCamelCase :List[Any] = model(**__lowercase)[0]
__UpperCamelCase :Optional[Any] = torch.Size((1, 11, 1_024))
self.assertEqual(output.shape , __lowercase)
# change to expected output here
__UpperCamelCase :Tuple = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__lowercase)
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=__lowercase))
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[int] = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''').to(__lowercase)
# change to intended input
__UpperCamelCase :List[Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]])
__UpperCamelCase :str = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]])
__UpperCamelCase :List[Any] = prepare_mam_aaa_inputs_dict(model.config , __lowercase , __lowercase)
with torch.no_grad():
__UpperCamelCase :Optional[int] = model(**__lowercase)[0]
__UpperCamelCase :List[Any] = torch.Size((1, 11, model.config.vocab_size))
self.assertEqual(output.shape , __lowercase)
# change to expected output here
__UpperCamelCase :int = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__lowercase)
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=__lowercase))
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Tuple = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''').to(__lowercase)
__UpperCamelCase :Optional[int] = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''')
__UpperCamelCase :Optional[int] = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__UpperCamelCase :List[str] = tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''')
__UpperCamelCase :Dict = model.generate(
input_ids=dct['''input_ids'''].to(__lowercase) , attention_mask=dct['''attention_mask'''].to(__lowercase) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''') , )
__UpperCamelCase :Any = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
__UpperCamelCase :str = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__lowercase , skip_special_tokens=__lowercase)
assert generated == expected_en
| 43
|
'''simple docstring'''
import math
import unittest
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 162
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """Wav2Vec2FeatureExtractor"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """AutoTokenizer"""
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = self.feature_extractor
UpperCAmelCase_ : Optional[int] = False
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
try:
return super().from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , lowerCamelCase_ , )
UpperCAmelCase_ : str = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ : List[str] = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
return cls(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
def __call__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_ , **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ : Optional[int] = kwargs.pop("audio" , lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("sampling_rate" , lowerCamelCase_ )
UpperCAmelCase_ : Dict = kwargs.pop("text" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
UpperCAmelCase_ : int = args[0]
UpperCAmelCase_ : Any = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ : Optional[int] = self.feature_extractor(lowerCamelCase_ , *lowerCamelCase_ , sampling_rate=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(lowerCamelCase_ , **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ : Any = encodings["input_ids"]
return inputs
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("input_features" , lowerCamelCase_ )
UpperCAmelCase_ : Tuple = kwargs.pop("labels" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
UpperCAmelCase_ : List[Any] = args[0]
UpperCAmelCase_ : str = args[1:]
if input_features is not None:
UpperCAmelCase_ : Dict = self.feature_extractor.pad(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if labels is not None:
UpperCAmelCase_ : List[str] = self.tokenizer.pad(lowerCamelCase_ , **lowerCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ : Optional[Any] = labels["input_ids"]
return input_features
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@contextmanager
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Union[str, Any] = self.tokenizer
yield
UpperCAmelCase_ : int = self.feature_extractor
UpperCAmelCase_ : Tuple = False
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87
|
import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowerCamelCase( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] = 'camembert'
def __init__( self, lowerCamelCase=3_05_22, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=2, lowerCamelCase="absolute", lowerCamelCase=True, lowerCamelCase=None, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_)
_lowercase : Any = vocab_size
_lowercase : Optional[int] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : str = hidden_act
_lowercase : Any = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : List[Any] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : Any = use_cache
_lowercase : Optional[int] = classifier_dropout
class _lowerCamelCase( SCREAMING_SNAKE_CASE__ ):
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 359
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 84
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: str
__UpperCamelCase: str
__UpperCamelCase: Optional[str] = None
__UpperCamelCase: Optional[str] = None
__UpperCamelCase: Optional[str] = None
@dataclass(frozen=snake_case__ )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: List[int]
__UpperCamelCase: Optional[List[int]] = None
__UpperCamelCase: Optional[List[int]] = None
__UpperCamelCase: Optional[Union[int, float]] = None
__UpperCamelCase: Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[InputFeatures]
def __init__( self : Optional[Any] , A : str , A : PreTrainedTokenizer , A : str , A : Optional[int] = None , A : List[Any]=False , A : bool = False , ):
_UpperCAmelCase : Optional[int] = hans_processors[task]()
_UpperCAmelCase : int = os.path.join(
A , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(A ) , A , ) , )
_UpperCAmelCase : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Tuple = label_list[2], label_list[1]
_UpperCAmelCase : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[str] = cached_features_file + ".lock"
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_UpperCAmelCase : str = torch.load(A )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_UpperCAmelCase : Optional[int] = (
processor.get_dev_examples(A ) if evaluate else processor.get_train_examples(A )
)
logger.info("Training examples: %s" , len(A ) )
_UpperCAmelCase : Optional[Any] = hans_convert_examples_to_features(A , A , A , A )
logger.info("Saving features into cached file %s" , A )
torch.save(self.features , A )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , A : str ):
return self.features[i]
def _A ( self : Optional[int] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: List[InputFeatures]
def __init__( self : Any , A : str , A : PreTrainedTokenizer , A : str , A : Optional[int] = 128 , A : str=False , A : bool = False , ):
_UpperCAmelCase : List[str] = hans_processors[task]()
_UpperCAmelCase : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : int = label_list[2], label_list[1]
_UpperCAmelCase : Optional[Any] = label_list
_UpperCAmelCase : Tuple = processor.get_dev_examples(A ) if evaluate else processor.get_train_examples(A )
_UpperCAmelCase : List[str] = hans_convert_examples_to_features(A , A , A , A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_UpperCAmelCase : List[str] = tf.data.Dataset.from_generator(
A , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _A ( self : Optional[int] ):
return self.dataset
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : int , A : List[Any] ):
return self.features[i]
def _A ( self : Optional[int] ):
return self.label_list
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : Optional[Any] , A : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(A , "heuristics_train_set.txt" ) ) , "train" )
def _A ( self : Optional[int] , A : List[str] ):
return self._create_examples(self._read_tsv(os.path.join(A , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _A ( self : Optional[int] ):
return ["contradiction", "entailment", "neutral"]
def _A ( self : Tuple , A : Optional[int] , A : Tuple ):
_UpperCAmelCase : List[Any] = []
for i, line in enumerate(A ):
if i == 0:
continue
_UpperCAmelCase : int = "%s-%s" % (set_type, line[0])
_UpperCAmelCase : int = line[5]
_UpperCAmelCase : Tuple = line[6]
_UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith("ex" ) else line[7]
_UpperCAmelCase : int = line[0]
examples.append(InputExample(guid=A , text_a=A , text_b=A , label=A , pairID=A ) )
return examples
def UpperCamelCase_ ( _UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = {label: i for i, label in enumerate(_UpperCAmelCase )}
_UpperCAmelCase : str = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
_UpperCAmelCase : str = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
_UpperCAmelCase : Dict = label_map[example.label] if example.label in label_map else 0
_UpperCAmelCase : Dict = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""hans""": 3,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""hans""": HansProcessor,
}
| 31
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, *lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Any=None, **lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowercase__ ( self : int, lowerCamelCase : str=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str = "eval" ):
'''simple docstring'''
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(lowerCamelCase )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase )
return metrics
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int=None, lowerCamelCase : str = "test" ):
'''simple docstring'''
lowercase__ = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions, '''predict''' )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase )
| 207
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
_lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
_lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase ).loss
_lowerCAmelCase = -tf.math.reduce_mean(lowerCamelCase ).numpy()
_lowerCAmelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 317
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 317
| 1
|
def SCREAMING_SNAKE_CASE_ ( __A : list[list] ) -> list[list]:
"""simple docstring"""
a_ : List[str] = current_set.copy()
for row_index, row in enumerate(__A ):
a_ : List[str] = row[0]
for column_index, column in enumerate(__A ):
if magnitude == 0:
a_ : Any = column
continue
a_ : List[str] = column / magnitude
# Subtract to cancel term
a_ : Any = current_set[0]
a_ : Optional[int] = [first_row]
a_ : Dict = current_set[1::]
for row in current_set:
a_ : str = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__A )
continue
for column_index in range(len(__A ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__A )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
a_ : Dict = final_set[0]
a_ : Union[str, Any] = []
a_ : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
a_ : List[Any] = simplify(__A )
for i in range(len(__A ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __A )
a_ : Any = resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( __A : list[list] ) -> list:
"""simple docstring"""
if len(__A ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
a_ : List[Any] = len(__A ) + 1
if any(len(__A ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(__A , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(__A ) == 1:
return [equations[0][-1] / equations[0][0]]
a_ : Union[str, Any] = equations.copy()
if any(0 in row for row in data_set ):
a_ : Any = data_set.copy()
a_ : Tuple = []
for row_index, row in enumerate(__A ):
if 0 not in row:
a_ : Any = data_set.pop(__A )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , __A )
a_ : List[Any] = data_set.copy()
a_ : Optional[Any] = simplify(__A )
a_ : Union[str, Any] = simplified[::-1]
a_ : list = []
for row in simplified:
a_ : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
a_ : int = row.copy()[: len(__A ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__A ) == 0:
solutions.append(0 )
continue
a_ : List[Any] = temp_row[1::]
a_ : Optional[int] = temp_row[::-1]
for column_index, column in enumerate(__A ):
current_solution -= column * solutions[column_index]
solutions.append(__A )
a_ : Tuple = []
for item in solutions:
final.append(float(round(__A , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Any = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = ["image_processor", "tokenizer"]
lowerCamelCase__ : Dict = "BlipImageProcessor"
lowerCamelCase__ : Union[str, Any] = "AutoTokenizer"
def __init__( self , a , a , a ) -> Optional[int]:
super().__init__(a , a )
# add QFormer tokenizer
lowercase__ : Dict = qformer_tokenizer
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase__ : List[Any] = BatchFeature()
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
lowercase__ : Optional[int] = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
lowercase__ : List[str] = qformer_text_encoding.pop('input_ids' )
lowercase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
lowercase__ : int = os.path.join(a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> str:
lowercase__ : str = AutoTokenizer.from_pretrained(a , subfolder='qformer_tokenizer' )
lowercase__ : int = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 77
| 0
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = tmp_path / "cache"
_lowerCamelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Tuple = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_sql_dataset(_lowerCamelCase , _lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[int] = tmp_path / "cache"
_lowerCamelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Tuple = features.copy() if features else default_expected_features
_lowerCamelCase : Dict = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_sql_dataset(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(_lowerCamelCase ) ) as con:
_lowerCamelCase : Any = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path / "cache"
_lowerCamelCase : Optional[int] = os.path.join(_lowerCamelCase , "tmp.sql" )
_lowerCamelCase : Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCamelCase ).read()
SqlDatasetWriter(_lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
_lowerCamelCase : List[str] = iter_sql_file(_lowerCamelCase )
_lowerCamelCase : List[Any] = iter_sql_file(_lowerCamelCase )
for rowa, rowa in zip(_lowerCamelCase , _lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path / "cache"
_lowerCamelCase : Dict = os.path.join(_lowerCamelCase , "tmp.sql" )
_lowerCamelCase : List[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCamelCase ).read()
SqlDatasetWriter(_lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
_lowerCamelCase : Any = iter_sql_file(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = iter_sql_file(_lowerCamelCase )
for rowa, rowa in zip(_lowerCamelCase , _lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Dict = tmp_path / "cache"
_lowerCamelCase : List[Any] = os.path.join(_lowerCamelCase , "tmp.sql" )
_lowerCamelCase : Dict = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCamelCase ).read()
with pytest.raises(_lowerCamelCase ):
SqlDatasetWriter(_lowerCamelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 340
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,)
# merge samples
if i == 0:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample
else:
_lowerCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,)
idx += 1
_lowerCamelCase : int = model_path_to_save + F"""_{idx}"""
@classmethod
def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : Dict = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
_lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(__lowerCAmelCase )
| 340
| 1
|
import math
import flax.linen as nn
import jax.numpy as jnp
def _a ( SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 , SCREAMING_SNAKE_CASE_ : float = 1 , SCREAMING_SNAKE_CASE_ : float = 1.0E4 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
__lowerCAmelCase = float(embedding_dim // 2 )
__lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) * -log_timescale_increment )
__lowerCAmelCase = jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) * jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 0 )
# scale embeddings
__lowerCAmelCase = scale * emb
if flip_sin_to_cos:
__lowerCAmelCase = jnp.concatenate([jnp.cos(SCREAMING_SNAKE_CASE_ ), jnp.sin(SCREAMING_SNAKE_CASE_ )] , axis=1 )
else:
__lowerCAmelCase = jnp.concatenate([jnp.sin(SCREAMING_SNAKE_CASE_ ), jnp.cos(SCREAMING_SNAKE_CASE_ )] , axis=1 )
__lowerCAmelCase = jnp.reshape(SCREAMING_SNAKE_CASE_ , [jnp.shape(SCREAMING_SNAKE_CASE_ )[0], embedding_dim] )
return signal
class a__ ( nn.Module ):
_a : int = 3_2
_a : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(_A )
__lowerCAmelCase = nn.silu(_A )
__lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(_A )
return temb
class a__ ( nn.Module ):
_a : int = 3_2
_a : bool = False
_a : float = 1
@nn.compact
def __call__( self , _A ):
"""simple docstring"""
return get_sinusoidal_embeddings(
_A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 92
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 1_28
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE = 14
SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 16
else:
raise ValueError('Model not supported' )
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 35
SCREAMING_SNAKE_CASE = 'speech-commands-v2-id2label.json'
else:
SCREAMING_SNAKE_CASE = 5_27
SCREAMING_SNAKE_CASE = 'audioset-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if "module.v" in name:
SCREAMING_SNAKE_CASE = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
SCREAMING_SNAKE_CASE = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
SCREAMING_SNAKE_CASE = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
SCREAMING_SNAKE_CASE = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
SCREAMING_SNAKE_CASE = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
SCREAMING_SNAKE_CASE = 10_24 if 'speech-commands' not in model_name else 1_28
SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = load_dataset('speech_commands' , 'v0.02' , split='validation' )
SCREAMING_SNAKE_CASE = dataset[0]['audio']['array']
else:
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=1_60_00 , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 113
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _A ( lowercase ):
"""simple docstring"""
a =Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(UpperCAmelCase__ , max_perimeter + 1 ):
a =(base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(UpperCAmelCase__ ):
a =int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _A ( lowercase = 10_00 ):
"""simple docstring"""
a =pythagorean_triple(UpperCAmelCase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 363
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **__A ) -> Dict:
super().__init__(**__A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__A )
def __call__( self , __A , __A = None , **__A , ) -> List[str]:
if "text_queries" in kwargs:
a =kwargs.pop('''text_queries''' )
if isinstance(__A , (str, Image.Image) ):
a ={'''image''': image, '''candidate_labels''': candidate_labels}
else:
a =image
a =super().__call__(__A , **__A )
return results
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[Any]:
a ={}
if "threshold" in kwargs:
a =kwargs['''threshold''']
if "top_k" in kwargs:
a =kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =load_image(inputs['''image'''] )
a =inputs['''candidate_labels''']
if isinstance(__A , __A ):
a =candidate_labels.split(''',''' )
a =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__A ):
a =self.tokenizer(__A , return_tensors=self.framework )
a =self.image_processor(__A , return_tensors=self.framework )
yield {
"is_last": i == len(__A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
a =model_inputs.pop('''target_size''' )
a =model_inputs.pop('''candidate_label''' )
a =model_inputs.pop('''is_last''' )
a =self.model(**__A )
a ={'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self , __A , __A=0.1 , __A=None ) -> List[str]:
a =[]
for model_output in model_outputs:
a =model_output['''candidate_label''']
a =BaseModelOutput(__A )
a =self.image_processor.post_process_object_detection(
outputs=__A , threshold=__A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
a =outputs['''scores'''][index].item()
a =self._get_bounding_box(outputs['''boxes'''][index][0] )
a ={'''score''': score, '''label''': label, '''box''': box}
results.append(__A )
a =sorted(__A , key=lambda __A : x["score"] , reverse=__A )
if top_k:
a =results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
a , a , a , a =box.int().tolist()
a ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 215
| 0
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__UpperCamelCase = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__UpperCamelCase = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__UpperCamelCase = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def a_ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}), reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'], )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=False) -> List[Any]:
if return_pvalue:
snake_case_ = pearsonr(lowerCAmelCase__, lowerCAmelCase__)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__, lowerCAmelCase__)[0])}
| 69
|
"""simple docstring"""
import argparse
__UpperCamelCase = '''docs/source/_static/js/custom.js'''
def UpperCAmelCase ( UpperCAmelCase ) -> int:
with open(UpperCAmelCase , encoding='utf-8' , newline='\n' ) as f:
snake_case_ = f.readlines()
snake_case_ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
snake_case_ = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
__UpperCamelCase = parser.parse_args()
update_custom_js(args.version)
| 69
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : int=7 , a : Tuple=3 , a : Union[str, Any]=30 , a : int=4_00 , a : Tuple=True , a : List[Any]=None , a : Any=True , a : str=1 / 2_55 , a : Optional[Any]=True , a : Optional[Any]=[0.5, 0.5, 0.5] , a : List[str]=[0.5, 0.5, 0.5] , a : List[Any]=True , ):
"""simple docstring"""
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_pad
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : List[str] , a : Optional[int]=False ):
"""simple docstring"""
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(A__ , Image.Image ):
__lowerCamelCase , __lowerCamelCase = image.size
else:
__lowerCamelCase , __lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
__lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
__lowerCamelCase = self.size['''shortest_edge''']
__lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
__lowerCamelCase = self.size['''shortest_edge''']
__lowerCamelCase = self.size['''shortest_edge''']
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(A__ , key=lambda a : item[0] )[0]
__lowerCamelCase = max(A__ , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''do_rescale''' ) )
self.assertTrue(hasattr(A__ , '''rescale_factor''' ) )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
self.assertTrue(hasattr(A__ , '''do_pad''' ) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
__lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
__lowerCamelCase = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(A__ , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(A__ , return_tensors='''pt''' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
__lowerCamelCase = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
__lowerCamelCase = image_processing(images=A__ , annotations=A__ , return_tensors='''pt''' )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A__ )
__lowerCamelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A__ , atol=1e-4 ) )
# verify area
__lowerCamelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A__ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A__ )
__lowerCamelCase = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A__ , atol=1e-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A__ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A__ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A__ ) )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A__ ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
__lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__lowerCamelCase = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
__lowerCamelCase = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors='''pt''' )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A__ )
__lowerCamelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A__ , atol=1e-4 ) )
# verify area
__lowerCamelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A__ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A__ )
__lowerCamelCase = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A__ , atol=1e-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A__ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A__ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A__ ) )
# verify masks
__lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A__ )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A__ ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A__ ) )
| 359
|
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : x[0] / x[1] , reverse=UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase = list(accumulate(UpperCamelCase__ ) )
__lowerCamelCase = bisect(UpperCamelCase__ , UpperCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase ):
if edge <= 0 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase_ ( __lowerCamelCase ):
if edge <= 0 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_snake_case : Optional[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase : Dict , **lowerCamelCase : List[Any] ) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 123
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Dict = 4_00_00_00 ):
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case , _snake_case : Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
_snake_case , _snake_case : Optional[int] = b, a + b
return sum(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 359
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A_ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = """https://pypi.org/pypi/diffusers/json"""
_snake_case : Optional[int] = json.loads(request.urlopen(snake_case__ ).read() )["""releases"""].keys()
return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_snake_case : str = Path(snake_case__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_snake_case : List[Any] = Path(snake_case__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_snake_case : List[Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
_snake_case : Tuple = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Tuple = False
_snake_case : Any = [module_file]
_snake_case : str = []
# Let's recurse through all relative imports
while not no_change:
_snake_case : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case__ ) )
_snake_case : Dict = Path(snake_case__ ).parent
_snake_case : Dict = [str(module_path / m ) for m in new_imports]
_snake_case : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_snake_case : str = [F"{f}.py" for f in new_import_files]
_snake_case : Dict = len(snake_case__ ) == 0
all_relative_imports.extend(snake_case__ )
return all_relative_imports
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : int = f.read()
# Imports of the form `import xxx`
_snake_case : Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Only keep the top-level module
_snake_case : Tuple = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
_snake_case : Any = list(set(snake_case__ ) )
_snake_case : int = []
for imp in imports:
try:
importlib.import_module(snake_case__ )
except ImportError:
missing_packages.append(snake_case__ )
if len(snake_case__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`" )
return get_relative_imports(snake_case__ )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = module_path.replace(os.path.sep , """.""" )
_snake_case : int = importlib.import_module(snake_case__ )
if class_name is None:
return find_pipeline_class(snake_case__ )
return getattr(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_snake_case : Tuple = dict(inspect.getmembers(snake_case__ , inspect.isclass ) )
_snake_case : Dict = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
_snake_case : List[str] = cls
return pipeline_class
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ):
"""simple docstring"""
_snake_case : List[str] = str(snake_case__ )
_snake_case : Optional[Any] = os.path.join(snake_case__ , snake_case__ )
if os.path.isfile(snake_case__ ):
_snake_case : List[str] = module_file_or_url
_snake_case : Optional[Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
_snake_case : Tuple = get_diffusers_versions()
# cut ".dev0"
_snake_case : Union[str, Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
_snake_case : int = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
_snake_case : Optional[Any] = F"v{revision}"
elif revision == "main":
_snake_case : int = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
_snake_case : List[str] = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ )
try:
_snake_case : Dict = cached_download(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
_snake_case : Union[str, Any] = """git"""
_snake_case : str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
_snake_case : str = hf_hub_download(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
_snake_case : Optional[Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
_snake_case : int = check_imports(snake_case__ )
# Now we move the module inside our cached dynamic modules.
_snake_case : Optional[int] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case__ )
_snake_case : Any = Path(snake_case__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case__ , submodule_path / module_file )
for module_needed in modules_needed:
_snake_case : Any = F"{module_needed}.py"
shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Any = use_auth_token
elif use_auth_token is True:
_snake_case : int = HfFolder.get_token()
else:
_snake_case : Optional[int] = None
_snake_case : int = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_snake_case : int = submodule_path / commit_hash
_snake_case : Optional[int] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case__ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case__ , F"{module_needed}.py" , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return os.path.join(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : Tuple , ):
"""simple docstring"""
_snake_case : Union[str, Any] = get_cached_module_file(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return get_class_in_module(snake_case__ , final_module.replace(""".py""" , """""" ) )
| 132
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ = {
'facebook/bart-base': 1_0_2_4,
'facebook/bart-large': 1_0_2_4,
'facebook/bart-large-mnli': 1_0_2_4,
'facebook/bart-large-cnn': 1_0_2_4,
'facebook/bart-large-xsum': 1_0_2_4,
'yjernite/bart_eli5': 1_0_2_4,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
__UpperCAmelCase : Dict = BartTokenizer
def __init__(self : List[Any] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int="replace" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : str="<pad>" , __UpperCAmelCase : Tuple="<mask>" , __UpperCAmelCase : Any=False , __UpperCAmelCase : Optional[Any]=True , **__UpperCAmelCase : Tuple , ) -> str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase__ = getattr(__UpperCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**__UpperCAmelCase )
UpperCAmelCase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ = "post_processor"
UpperCAmelCase__ = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
UpperCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase__ = tuple(state["cls"] )
UpperCAmelCase__ = False
if state.get("add_prefix_space" , __UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = True
if state.get("trim_offsets" , __UpperCAmelCase ) != trim_offsets:
UpperCAmelCase__ = trim_offsets
UpperCAmelCase__ = True
if changes_to_apply:
UpperCAmelCase__ = getattr(__UpperCAmelCase , state.pop("type" ) )
UpperCAmelCase__ = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def lowercase_ (self : Any ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
UpperCAmelCase__ = value
def lowercase_ (self : List[Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : List[str] ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = kwargs.get("is_split_into_words" , __UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Any , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any] ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = kwargs.get("is_split_into_words" , __UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def lowercase_ (self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=None ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ (self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 65
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : int ) -> Tuple:
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = SamImageProcessor()
_lowerCAmelCase = SamProcessor(UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Dict , **__snake_case : Any ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).image_processor
def lowercase__ ( self : Tuple ) -> int:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> str:
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
_lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Dict ) -> Any:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=UpperCAmelCase_ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(UpperCAmelCase_ , return_tensors="""np""" )
_lowerCAmelCase = processor(images=UpperCAmelCase_ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def lowercase__ ( self : Optional[int] ) -> int:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=UpperCAmelCase_ )
_lowerCAmelCase = [torch.ones((1, 3, 5, 5) )]
_lowerCAmelCase = [[17_64, 26_46]]
_lowerCAmelCase = [[6_83, 10_24]]
_lowerCAmelCase = processor.post_process_masks(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_lowerCAmelCase = processor.post_process_masks(
UpperCAmelCase_ , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
_lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase = processor.post_process_masks(UpperCAmelCase_ , np.array(UpperCAmelCase_ ) , np.array(UpperCAmelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase_ ):
_lowerCAmelCase = processor.post_process_masks(UpperCAmelCase_ , np.array(UpperCAmelCase_ ) , np.array(UpperCAmelCase_ ) )
@require_vision
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Any ) -> Optional[int]:
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = SamImageProcessor()
_lowerCAmelCase = SamProcessor(UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple , **__snake_case : Optional[int] ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).image_processor
def lowercase__ ( self : Dict ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Tuple:
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Union[str, Any]:
_lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
_lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Any ) -> Optional[Any]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=UpperCAmelCase_ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(UpperCAmelCase_ , return_tensors="""np""" )
_lowerCAmelCase = processor(images=UpperCAmelCase_ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def lowercase__ ( self : Tuple ) -> Optional[int]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=UpperCAmelCase_ )
_lowerCAmelCase = [tf.ones((1, 3, 5, 5) )]
_lowerCAmelCase = [[17_64, 26_46]]
_lowerCAmelCase = [[6_83, 10_24]]
_lowerCAmelCase = processor.post_process_masks(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_lowerCAmelCase = processor.post_process_masks(
UpperCAmelCase_ , tf.convert_to_tensor(UpperCAmelCase_ ) , tf.convert_to_tensor(UpperCAmelCase_ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
_lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase = processor.post_process_masks(
UpperCAmelCase_ , np.array(UpperCAmelCase_ ) , np.array(UpperCAmelCase_ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_lowerCAmelCase = processor.post_process_masks(
UpperCAmelCase_ , np.array(UpperCAmelCase_ ) , np.array(UpperCAmelCase_ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : str ) -> List[str]:
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = SamImageProcessor()
_lowerCAmelCase = SamProcessor(UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[Any] , **__snake_case : Union[str, Any] ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).image_processor
def lowercase__ ( self : Dict ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : str ) -> Optional[Any]:
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=UpperCAmelCase_ )
_lowerCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_lowerCAmelCase = [tf.convert_to_tensor(UpperCAmelCase_ )]
_lowerCAmelCase = [torch.tensor(UpperCAmelCase_ )]
_lowerCAmelCase = [[17_64, 26_46]]
_lowerCAmelCase = [[6_83, 10_24]]
_lowerCAmelCase = processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""tf""" )
_lowerCAmelCase = processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowercase__ ( self : Tuple ) -> List[Any]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = SamProcessor(image_processor=UpperCAmelCase_ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(UpperCAmelCase_ , return_tensors="""pt""" )["pixel_values"].numpy()
_lowerCAmelCase = processor(images=UpperCAmelCase_ , return_tensors="""pt""" )["pixel_values"].numpy()
_lowerCAmelCase = image_processor(UpperCAmelCase_ , return_tensors="""tf""" )["pixel_values"].numpy()
_lowerCAmelCase = processor(images=UpperCAmelCase_ , return_tensors="""tf""" )["pixel_values"].numpy()
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
| 365
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : Optional[Any] , **__snake_case : Any ) -> Tuple:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = vqa_pipeline(__snake_case , top_k=1 )
self.assertEqual(
__snake_case , [
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
] , )
@require_torch
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
@slow
@require_torch
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
pass
| 220
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
debug_launcher(test_ops.main )
| 76
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> str:
UpperCAmelCase : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
UpperCAmelCase : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__snake_case ) , __snake_case )
def A ( self : int ) -> str:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__snake_case ) , x.transpose() ) )
UpperCAmelCase : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Any = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : str = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , np.asarray(transpose(__snake_case ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(__snake_case , axes=(1, 2, 0) ) ) ) )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.reshape(__snake_case , (4, 3) ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.reshape(__snake_case , (12, 5) ) ) )
@require_torch
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_tf
def A ( self : int ) -> List[str]:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_flax
def A ( self : Any ) -> Dict:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.asarray(reshape(__snake_case , (4, 3) ) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.asarray(reshape(__snake_case , (12, 5) ) ) ) )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.squeeze(__snake_case ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.squeeze(__snake_case , axis=2 ) ) )
@require_torch
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : str = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_tf
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_flax
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.asarray(squeeze(__snake_case ) ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.asarray(squeeze(__snake_case , axis=2 ) ) ) )
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.expand_dims(__snake_case , axis=1 ) ) )
@require_torch
def A ( self : List[str] ) -> Tuple:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Any = tf.constant(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_flax
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = np.random.randn(3 , 4 )
UpperCAmelCase : str = jnp.array(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.asarray(expand_dims(__snake_case , axis=1 ) ) ) )
| 23
| 0
|
"""simple docstring"""
import random
def __lowerCAmelCase ( a__ , a__ , a__ = False ) -> Optional[int]:
__a = {i: [] for i in range(_A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_A ):
for j in range(i + 1 , _A ):
if random.random() < probability:
graph[i].append(_A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_A )
return graph
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
return {
i: [j for j in range(_A ) if i != j] for i in range(_A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 0
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if point:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
__UpperCamelCase :Optional[int] = (
'''Expected a list of numbers as input, found '''
f"""{type(SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :List[str] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Missing an input''' )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
"""simple docstring"""
from PIL import Image
def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image:
'''simple docstring'''
def brightness(lowercase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__UpperCAmelCase = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 84
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(lowercase__ ) * abs(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 356
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_snake_case : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=8 ):
__snake_case : List[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__snake_case : Optional[int] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : MultilingualCLIP , lowerCamelCase : XLMRobertaTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , lowerCamelCase : VQModel , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
__snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int ) -> Any:
if latents is None:
__snake_case : str = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__snake_case : Optional[int] = latents.to(lowerCamelCase )
__snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str=None , ) -> List[str]:
__snake_case : Tuple = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
__snake_case : Optional[int] = self.tokenizer(
lowerCamelCase , padding="max_length" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" , )
__snake_case : List[str] = text_inputs.input_ids
__snake_case : List[Any] = self.tokenizer(lowerCamelCase , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__snake_case : Any = text_input_ids.to(lowerCamelCase )
__snake_case : List[str] = text_inputs.attention_mask.to(lowerCamelCase )
__snake_case , __snake_case : List[str] = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
__snake_case : List[Any] = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : List[str] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Optional[int] = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Any = [""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='
F' {type(lowerCamelCase )}.' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__snake_case : int = negative_prompt
__snake_case : Dict = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" , )
__snake_case : Dict = uncond_input.input_ids.to(lowerCamelCase )
__snake_case : List[Any] = uncond_input.attention_mask.to(lowerCamelCase )
__snake_case , __snake_case : Tuple = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Dict = negative_prompt_embeds.shape[1]
__snake_case : int = negative_prompt_embeds.repeat(1 , lowerCamelCase )
__snake_case : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
__snake_case : Union[str, Any] = uncond_text_encoder_hidden_states.shape[1]
__snake_case : Tuple = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
__snake_case : str = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
__snake_case : Optional[int] = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
__snake_case : List[Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__snake_case : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __snake_case ( self : List[str] , lowerCamelCase : Dict=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device(F'cuda:{gpu_id}' )
__snake_case : Optional[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : int=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__snake_case : Optional[Any] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__snake_case : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__snake_case , __snake_case : List[Any] = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
__snake_case , __snake_case : Optional[int] = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
__snake_case : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[Any] ) -> Optional[int]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 100 , lowerCamelCase : float = 4.0 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[int] = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = len(lowerCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}' )
__snake_case : Any = self._execution_device
__snake_case : Any = batch_size * num_images_per_prompt
__snake_case : Any = guidance_scale > 1.0
__snake_case , __snake_case , __snake_case : Optional[Any] = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : str = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__snake_case : Dict = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
__snake_case : Tuple = self.scheduler.timesteps
__snake_case : Union[str, Any] = self.unet.config.in_channels
__snake_case , __snake_case : Tuple = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
__snake_case : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : int = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
__snake_case : Optional[Any] = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
__snake_case , __snake_case : Any = noise_pred.split(latents.shape[1] , dim=1 )
__snake_case , __snake_case : Union[str, Any] = noise_pred.chunk(2 )
__snake_case , __snake_case : str = variance_pred.chunk(2 )
__snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__snake_case : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__snake_case , __snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : str = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
__snake_case : str = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__snake_case : Union[str, Any] = image * 0.5 + 0.5
__snake_case : Union[str, Any] = image.clamp(0 , 1 )
__snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : str = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 134
| 0
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class snake_case_ ( __A ):
__A : torch.FloatTensor
class snake_case_ ( __A ,__A ):
@register_to_config
def __init__( self : Union[str, Any] , lowercase_ : int = 16 , lowercase_ : int = 88 , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 1 , lowercase_ : float = 0.0 , lowercase_ : int = 32 , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : str = "geglu" , lowercase_ : bool = True , lowercase_ : bool = True , ) -> Tuple:
super().__init__()
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Optional[int] = attention_head_dim
lowercase__ : Dict = num_attention_heads * attention_head_dim
lowercase__ : int = in_channels
lowercase__ : Optional[int] = torch.nn.GroupNorm(num_groups=lowercase_ , num_channels=lowercase_ , eps=1E-6 , affine=lowercase_ )
lowercase__ : Tuple = nn.Linear(lowercase_ , lowercase_ )
# 3. Define transformers blocks
lowercase__ : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
lowercase_ , lowercase_ , lowercase_ , dropout=lowercase_ , cross_attention_dim=lowercase_ , activation_fn=lowercase_ , attention_bias=lowercase_ , double_self_attention=lowercase_ , norm_elementwise_affine=lowercase_ , )
for d in range(lowercase_ )
] )
lowercase__ : List[Any] = nn.Linear(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , lowercase_ : int=1 , lowercase_ : str=None , lowercase_ : bool = True , ) -> Dict:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = hidden_states.shape
lowercase__ : Optional[int] = batch_frames // num_frames
lowercase__ : Union[str, Any] = hidden_states
lowercase__ : Optional[int] = hidden_states[None, :].reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Optional[int] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase__ : List[Any] = self.norm(lowercase_ )
lowercase__ : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowercase_ , lowercase_ )
lowercase__ : List[Any] = self.proj_in(lowercase_ )
# 2. Blocks
for block in self.transformer_blocks:
lowercase__ : Optional[Any] = block(
lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ , cross_attention_kwargs=lowercase_ , class_labels=lowercase_ , )
# 3. Output
lowercase__ : Optional[int] = self.proj_out(lowercase_ )
lowercase__ : List[Any] = (
hidden_states[None, None, :]
.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase__ : Any = hidden_states.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowercase_ )
| 87
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ : Dict = MaskFormerConfig(backbone_config=__lowerCamelCase )
UpperCAmelCase_ : int = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ : Dict = 847
UpperCAmelCase_ : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ : Tuple = 150
UpperCAmelCase_ : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ : str = 171
UpperCAmelCase_ : Optional[int] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ : int = 133
UpperCAmelCase_ : Tuple = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ : List[Any] = 19
UpperCAmelCase_ : Optional[int] = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ : Any = 65
UpperCAmelCase_ : Union[str, Any] = "mapillary-vistas-id2label.json"
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : str = val
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Tuple = in_proj_weight[:dim, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[: dim]
UpperCAmelCase_ : Any = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ : Tuple = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ : Tuple = in_proj_bias[-dim :]
# fmt: on
def __a ( __lowerCamelCase, __lowerCamelCase ):
# fmt: off
UpperCAmelCase_ : Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Any = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : int = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : Dict = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : str = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : Tuple = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : int = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : List[Any] = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def __a ( ):
UpperCAmelCase_ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Tuple = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
UpperCAmelCase_ : List[str] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase, "rb" ) as f:
UpperCAmelCase_ : Union[str, Any] = pickle.load(__lowerCamelCase )
UpperCAmelCase_ : str = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ : int = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase, config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase, __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
UpperCAmelCase_ : Dict = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase, param.shape )
UpperCAmelCase_ , UpperCAmelCase_ : str = model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ : Optional[int] = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ : List[str] = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ : Tuple = 6_5535
else:
UpperCAmelCase_ : Dict = 255
UpperCAmelCase_ : Optional[Any] = True if "ade" in model_name else False
UpperCAmelCase_ : Dict = MaskFormerImageProcessor(ignore_index=__lowerCamelCase, reduce_labels=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = image_processor(__lowerCamelCase, return_tensors="pt" )
UpperCAmelCase_ : Dict = model(**__lowerCamelCase )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ : Any = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __lowerCamelCase, atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 61
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
| 0
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_sql_dataset(UpperCamelCase_ , UpperCamelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_sql_dataset(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
lowerCAmelCase__ = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "tmp.sql" )
lowerCAmelCase__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase_ ).read()
SqlDatasetWriter(UpperCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCAmelCase__ = iter_sql_file(UpperCamelCase_ )
lowerCAmelCase__ = iter_sql_file(UpperCamelCase_ )
for rowa, rowa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "tmp.sql" )
lowerCAmelCase__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase_ ).read()
SqlDatasetWriter(UpperCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCAmelCase__ = iter_sql_file(UpperCamelCase_ )
lowerCAmelCase__ = iter_sql_file(UpperCamelCase_ )
for rowa, rowa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "tmp.sql" )
lowerCAmelCase__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase_ ).read()
with pytest.raises(UpperCamelCase_ ):
SqlDatasetWriter(UpperCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 340
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340
| 1
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( __UpperCamelCase = 1000000 , __UpperCamelCase = 10 )-> int:
UpperCamelCase = defaultdict(__UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 183
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list:
UpperCamelCase = word.split()
def justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
UpperCamelCase = max_width - width
UpperCamelCase = len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase = []
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = 0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
UpperCamelCase ,UpperCamelCase = [word], len(__UpperCamelCase )
UpperCamelCase = max_width - width - len(__UpperCamelCase )
answer.append(""" """.join(__UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 183
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCamelCase = (720, 1280) # Height, Width
__UpperCamelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCamelCase = 1 / 100
__UpperCamelCase = """"""
__UpperCamelCase = """"""
__UpperCamelCase = """"""
__UpperCamelCase = 250
def UpperCAmelCase ( ) -> None:
snake_case_ = get_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
for index in range(lowerCAmelCase_ ):
snake_case_ = random.sample(range(len(lowerCAmelCase_ ) ) , 4 )
snake_case_ = update_image_and_anno(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , filter_scale=lowerCAmelCase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ = random_chars(32 )
snake_case_ = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
snake_case_ = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , lowerCAmelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
snake_case_ = []
for anno in new_annos:
snake_case_ = anno[3] - anno[1]
snake_case_ = anno[4] - anno[2]
snake_case_ = anno[1] + width / 2
snake_case_ = anno[2] + height / 2
snake_case_ = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(lowerCAmelCase_ )
with open(f'{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> tuple[list, list]:
snake_case_ = []
snake_case_ = []
for label_file in glob.glob(os.path.join(lowerCAmelCase_ , '*.txt' ) ):
snake_case_ = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(lowerCAmelCase_ ) as in_file:
snake_case_ = in_file.readlines()
snake_case_ = os.path.join(lowerCAmelCase_ , f'{label_name}.jpg' )
snake_case_ = []
for obj_list in obj_lists:
snake_case_ = obj_list.rstrip('\n' ).split(' ' )
snake_case_ = float(obj[1] ) - float(obj[3] ) / 2
snake_case_ = float(obj[2] ) - float(obj[4] ) / 2
snake_case_ = float(obj[1] ) + float(obj[3] ) / 2
snake_case_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowerCAmelCase_ )
labels.append(lowerCAmelCase_ )
return img_paths, labels
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , ) -> tuple[list, list, str]:
snake_case_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = int(scale_x * output_size[1] )
snake_case_ = int(scale_y * output_size[0] )
snake_case_ = []
snake_case_ = []
for i, index in enumerate(lowerCAmelCase_ ):
snake_case_ = all_img_list[index]
path_list.append(lowerCAmelCase_ )
snake_case_ = all_annos[index]
snake_case_ = cva.imread(lowerCAmelCase_ )
if i == 0: # top-left
snake_case_ = cva.resize(lowerCAmelCase_ , (divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = bbox[2] * scale_y
snake_case_ = bbox[3] * scale_x
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case_ = cva.resize(lowerCAmelCase_ , (output_size[1] - divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = bbox[2] * scale_y
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case_ = cva.resize(lowerCAmelCase_ , (divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = bbox[3] * scale_x
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case_ = cva.resize(
lowerCAmelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase ( UpperCAmelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
snake_case_ = ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 69
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
A_ : Dict = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = EfficientNetConfig()
_UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase : str = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase : int = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase : Optional[Any] = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : List[str] = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase : List[str] = """huggingface/label-files"""
_UpperCAmelCase : Optional[Any] = """imagenet-1k-id2label.json"""
_UpperCAmelCase : List[str] = 1000
_UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
_UpperCAmelCase : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = idalabel
_UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
def snake_case_ ( lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : str = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : Tuple = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=lowerCAmelCase_ , )
return preprocessor
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase : Optional[int] = sorted(set(lowerCAmelCase_ ) )
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = {b: str(lowerCAmelCase_ ) for b, i in zip(lowerCAmelCase_ , range(lowerCAmelCase_ ) )}
_UpperCAmelCase : List[str] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase : Any = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase : str = """efficientnet.""" + item[1]
_UpperCAmelCase : Optional[Any] = """classifier.weight"""
_UpperCAmelCase : Optional[Any] = """classifier.bias"""
return key_mapping
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase : Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase : Any = torch.from_numpy(lowerCAmelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_UpperCAmelCase : int = torch.from_numpy(np.transpose(lowerCAmelCase_ ) )
else:
_UpperCAmelCase : List[str] = torch.from_numpy(lowerCAmelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = model_classes[model_name](
include_top=lowerCAmelCase_ , weights="""imagenet""" , input_tensor=lowerCAmelCase_ , input_shape=lowerCAmelCase_ , pooling=lowerCAmelCase_ , classes=1000 , classifier_activation="""softmax""" , )
_UpperCAmelCase : List[str] = original_model.trainable_variables
_UpperCAmelCase : Any = original_model.non_trainable_variables
_UpperCAmelCase : Optional[int] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase : Dict = param.numpy()
_UpperCAmelCase : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase : List[Any] = get_efficientnet_config(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(lowerCAmelCase_ ).eval()
_UpperCAmelCase : int = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase : Optional[int] = rename_keys(lowerCAmelCase_ )
replace_params(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase : str = convert_image_processor(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase : List[str] = hf_model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase : int = False
_UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_UpperCAmelCase : Optional[Any] = image.img_to_array(lowerCAmelCase_ )
_UpperCAmelCase : str = np.expand_dims(lowerCAmelCase_ , axis=0 )
_UpperCAmelCase : str = original_model.predict(lowerCAmelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase_ ):
os.mkdir(lowerCAmelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase_ )
preprocessor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase : List[Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCAmelCase_ )
hf_model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
A_ : Optional[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 215
| 0
|
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0 ) -> int:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
SCREAMING_SNAKE_CASE__ = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ':' , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ':' , _SCREAMING_SNAKE_CASE )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE__ = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE__ = param.view(*_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE__ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE__ = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE__ = param.view(*_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE__ = param.view(*_SCREAMING_SNAKE_CASE )
return param
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE__ = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE__ = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE__ = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE__ = ds_args.hidden_size
SCREAMING_SNAKE_CASE__ = ds_args.num_layers
SCREAMING_SNAKE_CASE__ = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE__ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE__ = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE__ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE__ = input_state_dict["checkpoint_version"]
else:
SCREAMING_SNAKE_CASE__ = 0.0
# The model.
SCREAMING_SNAKE_CASE__ = input_state_dict["model"]
# The language model.
SCREAMING_SNAKE_CASE__ = model["language_model"]
# The embeddings.
SCREAMING_SNAKE_CASE__ = lm["embedding"]
# The word embeddings.
SCREAMING_SNAKE_CASE__ = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE__ = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE__ = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE__ = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE__ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
SCREAMING_SNAKE_CASE__ = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE__ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE__ = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE__ = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE__ = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE__ = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE__ = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE__ = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE__ = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
SCREAMING_SNAKE_CASE__ = "ln_1" if op_name.startswith('input' ) else "ln_2"
SCREAMING_SNAKE_CASE__ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE__ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE__ = torch.tensor(-1e4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = masked_bias
SCREAMING_SNAKE_CASE__ = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE__ = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE__ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
SCREAMING_SNAKE_CASE__ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE__ = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE__ = transformer["final_layernorm.weight"]
SCREAMING_SNAKE_CASE__ = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE__ = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_SCREAMING_SNAKE_CASE , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_SCREAMING_SNAKE_CASE , help='An optional config json file describing the pre-trained model.' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE__ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
SCREAMING_SNAKE_CASE__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
SCREAMING_SNAKE_CASE__ = torch.load(args.path_to_checkpoint , map_location='cpu' )
SCREAMING_SNAKE_CASE__ = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE__ = "gelu_fast"
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE__ = "gelu_new"
else:
SCREAMING_SNAKE_CASE__ = "gelu"
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE__ = "gelu_new"
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE__ = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50256 , eos_token_id=50256 , )
else:
SCREAMING_SNAKE_CASE__ = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE__ = ["GPT2LMHeadModel"]
# Convert.
print('Converting' )
SCREAMING_SNAKE_CASE__ = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE__ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE__ = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE__ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
SCREAMING_SNAKE_CASE__ = "gpt2"
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = type(_SCREAMING_SNAKE_CASE ).__name__
SCREAMING_SNAKE_CASE__ = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 351
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split('/' )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(UpperCamelCase_ )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
__snake_case = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.