code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCamelCase_( lowerCamelCase_ ) -> str:
return "".join(sorted(lowerCamelCase_ ) )
def UpperCamelCase_( lowerCamelCase_ ) -> list[str]:
return word_by_signature[signature(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
SCREAMING_SNAKE_CASE : List[str] = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE : int = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 21
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase_ ( __lowercase : int ) -> Tuple:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A_ ( lowerCAmelCase_ ):
@staticmethod
def lowercase ( snake_case_ : ArgumentParser ):
_UpperCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=snake_case_ , default=snake_case_ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=snake_case_ , help="Name of the model to download" )
download_parser.set_defaults(func=snake_case_ )
def __init__( self : Tuple , snake_case_ : str , snake_case_ : str , snake_case_ : bool , snake_case_ : bool ):
_UpperCAmelCase = model
_UpperCAmelCase = cache
_UpperCAmelCase = force
_UpperCAmelCase = trust_remote_code
def lowercase ( self : Any ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 22
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : int ) -> Any:
UpperCAmelCase : List[Any] = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase : str = AutoTokenizer.from_pretrained(__snake_case )
UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
UpperCAmelCase : Tuple = tokenizer('''This is me''' , return_tensors='''pt''' )
UpperCAmelCase : Optional[Any] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase : str = model.generate(**__snake_case )
UpperCAmelCase : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
UpperCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase : Optional[int] = model_reloaded.generate(**__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case ) )
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
UpperCAmelCase : List[str] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__snake_case ):
model.save_pretrained(__snake_case )
UpperCAmelCase : List[str] = model.reverse_bettertransformer()
model.save_pretrained(__snake_case )
| 23
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'pegasus'
A_ : Union[str, Any] = ['past_key_values']
A_ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self : Any , a__ : Optional[int]=5_0265 , a__ : int=1024 , a__ : List[str]=12 , a__ : List[Any]=4096 , a__ : List[str]=16 , a__ : Any=12 , a__ : Union[str, Any]=4096 , a__ : Tuple=16 , a__ : Optional[Any]=0.0 , a__ : Any=0.0 , a__ : List[str]=True , a__ : Optional[int]=True , a__ : Union[str, Any]="gelu" , a__ : List[str]=1024 , a__ : int=0.1 , a__ : List[str]=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : Optional[int]=0 , a__ : Tuple=False , a__ : List[Any]=0 , a__ : Any=1 , a__ : List[Any]=1 , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
@property
def a (self : Any ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def a (self : Tuple ):
"""simple docstring"""
return self.d_model
| 24
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase__ : Any = 5_0_0_0_3
UpperCAmelCase__ : Tuple = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = PLBartTokenizer
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Dict = False
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="""base""" , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="""base""" , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 4 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
SCREAMING_SNAKE_CASE__ : str = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="""multi""" , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Dict = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 7 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
SCREAMING_SNAKE_CASE__ : str = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
SCREAMING_SNAKE_CASE__ : str = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''uclanlp/plbart-python-en_XX'''
__UpperCamelCase : Tuple = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
__UpperCamelCase : Optional[int] = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
__UpperCamelCase : Union[str, Any] = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __magic_name__ (cls ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
SCREAMING_SNAKE_CASE__ : Dict = 1
return cls
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : int = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = 10
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = PLBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = targets["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Dict = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 25
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a , __a=1024 , __a=1024 , __a=3.6 ):
'''simple docstring'''
__a : List[str] = tokenizer
__a : List[Any] = tokenizer.bos_token_id
__a : Tuple = dataset
__a : Optional[int] = seq_length
__a : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__a : Optional[Any] = iter(self.dataset )
__a : Optional[int] = True
while more_examples:
__a , __a : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__a )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__a : str = False
break
__a : Optional[int] = tokenizer(__a , truncation=__a )['input_ids']
__a : str = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__a ) , self.seq_length ):
__a : int = all_token_ids[i : i + self.seq_length]
if len(__a ) == self.seq_length:
yield torch.tensor(__a )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : List[Any] = {'streaming': True}
__a : int = load_dataset(args.dataset_name , split='train' , **_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length )
__a : Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
model.eval()
__a : Any = []
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
with torch.no_grad():
__a : str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
__a : int = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__a : Union[str, Any] = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) )
try:
__a : int = torch.exp(_SCREAMING_SNAKE_CASE )
except OverflowError:
__a : str = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
__lowercase : List[Any] = Accelerator()
# Parse configuration
__lowercase : Optional[int] = HfArgumentParser(EvaluationArguments)
__lowercase : int = parser.parse_args()
set_seed(args.seed)
# Logging
__lowercase : str = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
__lowercase : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__lowercase : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__lowercase : List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
__lowercase , __lowercase : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
__lowercase , __lowercase : List[str] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 27
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : str = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
_lowerCamelCase : List[str] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = MBartTokenizer
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
def __init__( self : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : Optional[Any]="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : Optional[Any]="<mask>" , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase = src_lang if src_lang is not None else 'en_XX'
UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : str ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def A ( self : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : int ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase = src_lang
UpperCamelCase = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ )
UpperCamelCase = tgt_lang_id
return inputs
def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = src_lang
UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Union[str, Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 28
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = [False] * len(__snake_case )
UpperCAmelCase_ : Dict = [-1] * len(__snake_case )
def dfs(__snake_case : Dict , __snake_case : Tuple ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c )
for i in range(len(__snake_case ) ):
if not visited[i]:
dfs(__snake_case , 0 )
for i in range(len(__snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__SCREAMING_SNAKE_CASE : Dict = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = "https://pypi.org/pypi/diffusers/json"
_UpperCAmelCase : Optional[int] = json.loads(request.urlopen(_UpperCAmelCase ).read() )["releases"].keys()
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : version.Version(_UpperCAmelCase ) )
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase : List[Any] = Path(_UpperCAmelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, os.PathLike] ) -> List[str]:
"""simple docstring"""
init_hf_modules()
_UpperCAmelCase : List[Any] = Path(_UpperCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase : str = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
with open(_UpperCAmelCase , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[int] = f.read()
# Imports of the form `import .xxx`
_UpperCAmelCase : str = re.findall("^\s*import\s+\.(\S+)\s*$" , _UpperCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _UpperCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_UpperCAmelCase ) )
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = [module_file]
_UpperCAmelCase : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
_UpperCAmelCase : List[str] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_UpperCAmelCase ) )
_UpperCAmelCase : str = Path(_UpperCAmelCase ).parent
_UpperCAmelCase : Optional[Any] = [str(module_path / m ) for m in new_imports]
_UpperCAmelCase : List[Any] = [f for f in new_import_files if f not in all_relative_imports]
_UpperCAmelCase : Tuple = [F"""{f}.py""" for f in new_import_files]
_UpperCAmelCase : Tuple = len(_UpperCAmelCase ) == 0
all_relative_imports.extend(_UpperCAmelCase )
return all_relative_imports
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(_UpperCAmelCase , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[Any] = f.read()
# Imports of the form `import xxx`
_UpperCAmelCase : List[Any] = re.findall("^\s*import\s+(\S+)\s*$" , _UpperCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _UpperCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
_UpperCAmelCase : Union[str, Any] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
_UpperCAmelCase : str = list(set(_UpperCAmelCase ) )
_UpperCAmelCase : Tuple = []
for imp in imports:
try:
importlib.import_module(_UpperCAmelCase )
except ImportError:
missing_packages.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F"""{', '.join(_UpperCAmelCase )}. Run `pip install {' '.join(_UpperCAmelCase )}`""" )
return get_relative_imports(_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Any = module_path.replace(os.path.sep , "." )
_UpperCAmelCase : str = importlib.import_module(_UpperCAmelCase )
if class_name is None:
return find_pipeline_class(_UpperCAmelCase )
return getattr(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_UpperCAmelCase : List[Any] = dict(inspect.getmembers(_UpperCAmelCase , inspect.isclass ) )
_UpperCAmelCase : int = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _UpperCAmelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
_UpperCAmelCase : Dict = cls
return pipeline_class
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, os.PathLike] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Union[str, os.PathLike]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[Dict[str, str]] = None , _UpperCAmelCase : Optional[Union[bool, str]] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = str(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase : Union[str, Any] = module_file_or_url
_UpperCAmelCase : Optional[Any] = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
_UpperCAmelCase : Tuple = get_diffusers_versions()
# cut ".dev0"
_UpperCAmelCase : str = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
_UpperCAmelCase : Tuple = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_UpperCAmelCase : Tuple = F"""v{revision}"""
elif revision == "main":
_UpperCAmelCase : Tuple = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_UpperCAmelCase : List[str] = COMMUNITY_PIPELINES_URL.format(revision=_UpperCAmelCase , pipeline=_UpperCAmelCase )
try:
_UpperCAmelCase : str = cached_download(
_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , )
_UpperCAmelCase : int = "git"
_UpperCAmelCase : Tuple = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_UpperCAmelCase : Union[str, Any] = hf_hub_download(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , )
_UpperCAmelCase : Tuple = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_UpperCAmelCase : Optional[Any] = check_imports(_UpperCAmelCase )
# Now we move the module inside our cached dynamic modules.
_UpperCAmelCase : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = Path(_UpperCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_UpperCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
_UpperCAmelCase : Optional[int] = F"""{module_needed}.py"""
shutil.copy(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase : List[Any] = use_auth_token
elif use_auth_token is True:
_UpperCAmelCase : Tuple = HfFolder.get_token()
else:
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Tuple = model_info(_UpperCAmelCase , revision=_UpperCAmelCase , token=_UpperCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_UpperCAmelCase : Any = submodule_path / commit_hash
_UpperCAmelCase : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_UpperCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_UpperCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_UpperCAmelCase , F"""{module_needed}.py""" , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
return os.path.join(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, os.PathLike] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[Union[str, os.PathLike]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[Dict[str, str]] = None , _UpperCAmelCase : Optional[Union[bool, str]] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = get_cached_module_file(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
return get_class_in_module(_UpperCAmelCase , final_module.replace(".py" , "" ) )
| 31
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : Dict = 3
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
pass
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for shard in shards:
for i in range(__A ):
yield {"i": i, "shard": shard}
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
a_ : List[str] = int(os.environ['RANK'] )
a_ : List[str] = int(os.environ['WORLD_SIZE'] )
a_ : List[Any] = ArgumentParser()
parser.add_argument('--streaming' , type=__A )
parser.add_argument('--local_rank' , type=__A )
parser.add_argument('--num_workers' , type=__A , default=0 )
a_ : Any = parser.parse_args()
a_ : Tuple = args.streaming
a_ : str = args.num_workers
a_ : Union[str, Any] = {'shards': [F"""shard_{shard_idx}""" for shard_idx in range(__A )]}
a_ : List[str] = IterableDataset.from_generator(__A , gen_kwargs=__A )
if not streaming:
a_ : Union[str, Any] = Dataset.from_list(list(__A ) )
a_ : Tuple = split_dataset_by_node(__A , rank=__A , world_size=__A )
a_ : Optional[Any] = torch.utils.data.DataLoader(__A , num_workers=__A )
a_ : Any = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a_ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a_ : Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 32
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 0
|
"""simple docstring"""
from typing import Any
class _UpperCAmelCase :
def __init__( self : Optional[Any] , A : Any ) -> Tuple:
lowercase_ : List[Any] = data
lowercase_ : Union[str, Any] = None
def __repr__( self : Union[str, Any] ) -> str:
return F'''Node({self.data})'''
class _UpperCAmelCase :
def __init__( self : Dict ) -> str:
lowercase_ : Dict = None
def __iter__( self : Optional[int] ) -> Any:
lowercase_ : Optional[Any] = self.head
while node:
yield node.data
lowercase_ : Tuple = node.next
def __len__( self : Tuple ) -> int:
return sum(1 for _ in self )
def __repr__( self : Tuple ) -> str:
return "->".join([str(A ) for item in self] )
def __getitem__( self : List[str] , A : int ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : List[Any] , A : int , A : Any ) -> None:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
lowercase_ : List[Any] = self.head
for _ in range(A ):
lowercase_ : Union[str, Any] = current.next
lowercase_ : str = data
def A ( self : Optional[int] , A : Any ) -> None:
self.insert_nth(len(self ) , A )
def A ( self : Any , A : Any ) -> None:
self.insert_nth(0 , A )
def A ( self : str , A : int , A : Any ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
lowercase_ : Union[str, Any] = Node(A )
if self.head is None:
lowercase_ : Union[str, Any] = new_node
elif index == 0:
lowercase_ : Tuple = self.head # link new_node to head
lowercase_ : List[str] = new_node
else:
lowercase_ : str = self.head
for _ in range(index - 1 ):
lowercase_ : Dict = temp.next
lowercase_ : str = temp.next
lowercase_ : Optional[Any] = new_node
def A ( self : Optional[Any] ) -> None: # print every node data
print(self )
def A ( self : Dict ) -> Any:
return self.delete_nth(0 )
def A ( self : Any ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def A ( self : List[Any] , A : int = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
lowercase_ : Dict = self.head # default first node
if index == 0:
lowercase_ : List[str] = self.head.next
else:
lowercase_ : Optional[Any] = self.head
for _ in range(index - 1 ):
lowercase_ : Union[str, Any] = temp.next
lowercase_ : Optional[int] = temp.next
lowercase_ : List[str] = temp.next.next
return delete_node.data
def A ( self : List[Any] ) -> bool:
return self.head is None
def A ( self : Union[str, Any] ) -> None:
lowercase_ : str = None
lowercase_ : Optional[Any] = self.head
while current:
# Store the current node's next node.
lowercase_ : int = current.next
# Make the current node's next point backwards
lowercase_ : Union[str, Any] = prev
# Make the previous node be the current node
lowercase_ : str = current
# Make the current node the next node (to progress iteration)
lowercase_ : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowercase_ : Any = prev
def lowercase ( ):
lowercase_ : str = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase_ : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def lowercase ( ):
lowercase_ : Optional[int] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'''dlrow olleH''',
7,
5_5_5_5,
0,
-192.55555,
'''Hello, world!''',
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowercase_ : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase_ : int = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase_ : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase_ : int = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase ( ):
from doctest import testmod
testmod()
lowercase_ : List[Any] = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__snake_case )
print('''\nReading/changing Node data using indexing:''' )
print(F'''Element at Position 1: {linked_list[1]}''' )
lowercase_ : int = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__snake_case )
print(F'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 33
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 0
|
'''simple docstring'''
import qiskit
def snake_case_ (_a : int , _a : int ):
UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase = qiskit.QuantumCircuit(_a , _a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase = qiskit.execute(_a , _a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_a )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 34
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__a = TypeVar("T")
__a = TypeVar("U")
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : T | None , snake_case_ : U | None ):
snake_case__ : Dict = key
snake_case__ : Tuple = val
snake_case__ : DoubleLinkedListNode[T, U] | None = None
snake_case__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : int ):
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Tuple ):
snake_case__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case_ , snake_case_ )
snake_case__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case_ , snake_case_ )
snake_case__ , snake_case__ : Tuple = self.rear, self.head
def __repr__( self : List[str] ):
snake_case__ : Dict = ["""DoubleLinkedList"""]
snake_case__ : Any = self.head
while node.next is not None:
rep.append(str(snake_case_ ) )
snake_case__ : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : DoubleLinkedListNode[T, U] ):
snake_case__ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case__ : Tuple = node
snake_case__ : Optional[Any] = previous
snake_case__ : int = node
snake_case__ : List[str] = self.rear
def lowerCamelCase ( self : List[str] , snake_case_ : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
snake_case__ : Optional[int] = node.next
snake_case__ : List[str] = node.prev
snake_case__ : Optional[int] = None
snake_case__ : Optional[Any] = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
lowercase = {}
def __init__( self : str , snake_case_ : int ):
snake_case__ : DoubleLinkedList[T, U] = DoubleLinkedList()
snake_case__ : Tuple = capacity
snake_case__ : Dict = 0
snake_case__ : Dict = 0
snake_case__ : List[Any] = 0
snake_case__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ):
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : Optional[int] , snake_case_ : T ):
return key in self.cache
def lowerCamelCase ( self : Dict , snake_case_ : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
snake_case__ : DoubleLinkedListNode[T, U] = self.cache[key]
snake_case__ : List[str] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case_ )
return node.val
self.miss += 1
return None
def lowerCamelCase ( self : List[str] , snake_case_ : T , snake_case_ : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case__ : Tuple = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case__ : Optional[int] = DoubleLinkedListNode(snake_case_ , snake_case_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case__ : List[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
snake_case__ : int = value
self.list.add(snake_case_ )
@classmethod
def lowerCamelCase ( cls : Dict , snake_case_ : int = 128 ):
def cache_decorator_inner(snake_case_ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case_ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case__ : Union[str, Any] = LRUCache(snake_case_ )
snake_case__ : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
snake_case__ : Any = func(*snake_case_ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case_ , """cache_info""" , snake_case_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
_snake_case = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
_snake_case = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = BartTokenizer
def __init__( self, __a=None, __a=None, __a=None, __a="replace", __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=False, __a=True, **__a, ):
'''simple docstring'''
super().__init__(
__a, __a, tokenizer_file=__a, errors=__a, bos_token=__a, eos_token=__a, sep_token=__a, cls_token=__a, unk_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, trim_offsets=__a, **__a, )
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", __a) != add_prefix_space:
_lowerCAmelCase : List[Any] = getattr(__a, pre_tok_state.pop("type"))
_lowerCAmelCase : Any = add_prefix_space
_lowerCAmelCase : Any = pre_tok_class(**__a)
_lowerCAmelCase : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase : Tuple = "post_processor"
_lowerCAmelCase : Union[str, Any] = getattr(self.backend_tokenizer, __a, __a)
if tokenizer_component_instance:
_lowerCAmelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Tuple = tuple(state["sep"])
if "cls" in state:
_lowerCAmelCase : List[Any] = tuple(state["cls"])
_lowerCAmelCase : List[str] = False
if state.get("add_prefix_space", __a) != add_prefix_space:
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : List[Any] = True
if state.get("trim_offsets", __a) != trim_offsets:
_lowerCAmelCase : Any = trim_offsets
_lowerCAmelCase : Union[str, Any] = True
if changes_to_apply:
_lowerCAmelCase : Optional[int] = getattr(__a, state.pop("type"))
_lowerCAmelCase : Optional[Any] = component_class(**__a)
setattr(self.backend_tokenizer, __a, __a)
@property
def snake_case__ ( self):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else value
_lowerCAmelCase : Optional[int] = value
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get("is_split_into_words", __a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = kwargs.get("is_split_into_words", __a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._encode_plus(*__a, **__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 36
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar('''T''')
class lowerCAmelCase_( Generic[T] ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = True ) -> None:
lowerCAmelCase__ : dict[T, list[T]] = {} # dictionary of lists
lowerCAmelCase__ : Optional[int] = directed
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
self.adj_list[destination_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCAmelCase__ : List[Any] = [destination_vertex]
lowerCAmelCase__ : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCAmelCase__ : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCAmelCase__ : Dict = [destination_vertex]
lowerCAmelCase__ : Optional[Any] = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 37
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase_ : str = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : List[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 38
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
"""simple docstring"""
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = eval_examples
_UpperCAmelCase = post_process_function
def UpperCamelCase ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = "eval" ):
"""simple docstring"""
_UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase = self.get_eval_dataloader(UpperCAmelCase )
_UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase = time.time()
try:
_UpperCAmelCase = eval_loop(
UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase , metric_key_prefix=UpperCAmelCase , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase , UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase = self.post_process_function(UpperCAmelCase , UpperCAmelCase , output.predictions )
_UpperCAmelCase = self.compute_metrics(UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_UpperCAmelCase = metrics.pop(UpperCAmelCase )
metrics.update(output.metrics )
else:
_UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase )
return metrics
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase = "test" ):
"""simple docstring"""
_UpperCAmelCase = self.get_test_dataloader(UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase = time.time()
try:
_UpperCAmelCase = eval_loop(
UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase , metric_key_prefix=UpperCAmelCase , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase , UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase = self.post_process_function(UpperCAmelCase , UpperCAmelCase , output.predictions , 'predict' )
_UpperCAmelCase = self.compute_metrics(UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_UpperCAmelCase = metrics.pop(UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase )
| 39
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 0
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
if not is_accelerate_available():
return method
a : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(A_ ) < version.parse("0.17.0" ):
return method
def wrapper(self , *A_ , **A_ ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *A_ , **A_ )
return wrapper
| 40
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_A : Optional[int] ='''hf-internal-testing/tiny-random-bert'''
_A : Union[str, Any] =os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
_A : Optional[Any] ='''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
lowerCamelCase__ : int = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
lowerCamelCase__ : Union[str, Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
lowerCamelCase__ : str = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""9b8c223""" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[Any] ):
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
lowerCamelCase__ : Tuple = cached_file("""tiny-random-bert""" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
lowerCamelCase__ : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""aaaa""" )
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
lowerCamelCase__ : str = cached_file(UpperCamelCase__ , """conf""" )
def lowerCamelCase_ ( self: Optional[int] ):
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
lowerCamelCase__ : Any = cached_file(UpperCamelCase__ , """conf""" )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
lowerCamelCase__ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , """.no_exist""" , UpperCamelCase__ , """conf""" ) ) )
lowerCamelCase__ : Optional[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : Dict = cached_file(UpperCamelCase__ , """conf""" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = mock.Mock()
lowerCamelCase__ : str = 500
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Union[str, Any] = HTTPError
lowerCamelCase__ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase__ ) as mock_head:
lowerCamelCase__ : List[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self: Dict ):
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Optional[Any] ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ , revision="""ahaha""" )
lowerCamelCase__ : Tuple = get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase__ : str = json.loads(open(UpperCamelCase__ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCamelCase_ ( self: List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : int = Path(UpperCamelCase__ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , """a.txt""" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , """b.txt""" ) )
| 41
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 0
|
'''simple docstring'''
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = ''
_snake_case = ''
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_snake_case = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_snake_case = self.__min_dist_top_down_dp(lowerCAmelCase_ , n - 1 )
_snake_case = self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase_ )
_snake_case = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_snake_case = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self.dp[m][n]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = worda
_snake_case = worda
_snake_case = [[-1 for _ in range(len(lowerCAmelCase_ ) )] for _ in range(len(lowerCAmelCase_ ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase_ ) - 1 , len(lowerCAmelCase_ ) - 1 )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = worda
_snake_case = worda
_snake_case = len(lowerCAmelCase_ )
_snake_case = len(lowerCAmelCase_ )
_snake_case = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_snake_case = j
elif j == 0: # second string is empty
_snake_case = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_snake_case = self.dp[i - 1][j - 1]
else:
_snake_case = self.dp[i][j - 1]
_snake_case = self.dp[i - 1][j]
_snake_case = self.dp[i - 1][j - 1]
_snake_case = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self.dp[m][n]
if __name__ == "__main__":
lowercase : str = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowercase : Dict = input("Enter the first string: ").strip()
lowercase : str = input("Enter the second string: ").strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 42
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 0
|
from __future__ import annotations
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase) -> None:
__UpperCamelCase :Optional[int] = data
__UpperCamelCase :Node | None = None
__UpperCamelCase :Node | None = None
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase ( ): # Main function for testing.
'''simple docstring'''
__UpperCamelCase :int = Node(1 )
__UpperCamelCase :Dict = Node(2 )
__UpperCamelCase :List[str] = Node(3 )
__UpperCamelCase :Optional[int] = Node(4 )
__UpperCamelCase :Optional[Any] = Node(5 )
__UpperCamelCase :List[str] = Node(6 )
__UpperCamelCase :str = Node(7 )
__UpperCamelCase :Dict = Node(8 )
__UpperCamelCase :Optional[int] = Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE ) )
print('''Tree is: ''' )
display(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : List[Any] = logging.get_logger(__name__)
# General docstring
_a : Optional[int] = 'RegNetConfig'
# Base docstring
_a : Union[str, Any] = 'facebook/regnet-y-040'
_a : Tuple = [1, 1_088, 7, 7]
# Image classification docstring
_a : Tuple = 'facebook/regnet-y-040'
_a : Union[str, Any] = 'tabby, tabby cat'
_a : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 3 , a__ = 1 , a__ = 1 , a__ = "relu" , ):
super().__init__()
_lowerCAmelCase : str = nn.Convad(
a__ , a__ , kernel_size=a__ , stride=a__ , padding=kernel_size // 2 , groups=a__ , bias=a__ , )
_lowerCAmelCase : str = nn.BatchNormad(a__ )
_lowerCAmelCase : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = self.convolution(a__ )
_lowerCAmelCase : Union[str, Any] = self.normalization(a__ )
_lowerCAmelCase : List[Any] = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase : List[Any] = config.num_channels
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_lowerCAmelCase : List[Any] = self.embedder(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 2 ):
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Convad(a__ , a__ , kernel_size=1 , stride=a__ , bias=a__ )
_lowerCAmelCase : Optional[int] = nn.BatchNormad(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = self.convolution(a__ )
_lowerCAmelCase : Tuple = self.normalization(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Dict = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase : Tuple = nn.Sequential(
nn.Convad(a__ , a__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(a__ , a__ , kernel_size=1 ) , nn.Sigmoid() , )
def __A ( self , a__ ):
# b c h w -> b c 1 1
_lowerCAmelCase : Tuple = self.pooler(a__ )
_lowerCAmelCase : int = self.attention(a__ )
_lowerCAmelCase : Optional[Any] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 1 ):
super().__init__()
_lowerCAmelCase : Optional[Any] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Optional[Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Tuple = (
RegNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : str = nn.Sequential(
RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , stride=a__ , groups=a__ , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
_lowerCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def __A ( self , a__ ):
_lowerCAmelCase : Any = hidden_state
_lowerCAmelCase : Any = self.layer(a__ )
_lowerCAmelCase : str = self.shortcut(a__ )
hidden_state += residual
_lowerCAmelCase : int = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 1 ):
super().__init__()
_lowerCAmelCase : Dict = in_channels != out_channels or stride != 1
_lowerCAmelCase : List[Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : List[Any] = (
RegNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : Tuple = nn.Sequential(
RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , stride=a__ , groups=a__ , activation=config.hidden_act ) , RegNetSELayer(a__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
_lowerCAmelCase : Dict = ACTaFN[config.hidden_act]
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = hidden_state
_lowerCAmelCase : Optional[int] = self.layer(a__ )
_lowerCAmelCase : List[Any] = self.shortcut(a__ )
hidden_state += residual
_lowerCAmelCase : List[str] = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , ):
super().__init__()
_lowerCAmelCase : List[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_lowerCAmelCase : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
a__ , a__ , a__ , stride=a__ , ) , *[layer(a__ , a__ , a__ ) for _ in range(depth - 1 )] , )
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = self.layers(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a__ , config.depths[1:] ):
self.stages.append(RegNetStage(a__ , a__ , a__ , depth=a__ ) )
def __A ( self , a__ , a__ = False , a__ = True ):
_lowerCAmelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
_lowerCAmelCase : Union[str, Any] = stage_module(a__ )
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = RegNetConfig
_UpperCamelCase : int = "regnet"
_UpperCamelCase : List[str] = "pixel_values"
_UpperCamelCase : Union[str, Any] = True
def __A ( self , a__ ):
if isinstance(a__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __A ( self , a__ , a__=False ):
if isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = value
_a : List[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_a : List[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : Any = RegNetEmbeddings(a__ )
_lowerCAmelCase : List[str] = RegNetEncoder(a__ )
_lowerCAmelCase : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self , a__ , a__ = None , a__ = None ):
_lowerCAmelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[Any] = self.embedder(a__ )
_lowerCAmelCase : Optional[Any] = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ )
_lowerCAmelCase : str = encoder_outputs[0]
_lowerCAmelCase : Optional[Any] = self.pooler(a__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config.num_labels
_lowerCAmelCase : Optional[int] = RegNetModel(a__ )
# classification head
_lowerCAmelCase : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Union[str, Any] = self.regnet(a__ , output_hidden_states=a__ , return_dict=a__ )
_lowerCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : str = self.classifier(a__ )
_lowerCAmelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Optional[int] = """single_label_classification"""
else:
_lowerCAmelCase : Optional[int] = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[Any] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : Dict = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : Dict = BCEWithLogitsLoss()
_lowerCAmelCase : Tuple = loss_fct(a__ , a__ )
if not return_dict:
_lowerCAmelCase : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 44
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase ( lowerCAmelCase__ : int ) -> Optional[int]:
# vision encoder
if "img_encoder.pos_embed" in name:
__a = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
__a = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
__a = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
__a = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
__a = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
__a = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
__a = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
__a = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
__a = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
__a = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
__a = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
__a = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
__a = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
__a = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
__a = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__a = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__a = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__a = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
__a = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
__a = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
__a = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
__a = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
__a = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
__a = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> int:
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a = key.split('''.''' )
__a , __a = int(key_split[2] ), int(key_split[4] )
__a = config.vision_config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a = key.split('''.''' )
__a = int(key_split[3] )
__a = config.text_config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[
dim : dim * 2, :
]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__a = val.squeeze_()
else:
__a = val
return orig_state_dict
def lowercase ( ) -> Tuple:
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any="groupvit-gcc-yfcc" , lowerCAmelCase__ : List[str]=False ) -> Optional[Any]:
__a = GroupViTConfig()
__a = GroupViTModel(lowerCAmelCase__ ).eval()
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
__a = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
__a = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
__a = prepare_img()
__a = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
__a = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__a = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''' , lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
lowercase_ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 45
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 0
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase = R"""\w+[.]\d+"""
lowerCAmelCase = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
lowerCAmelCase = key.replace(SCREAMING_SNAKE_CASE , """_""".join(pat.split(""".""" ) ) )
return key
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=42 ):
'''simple docstring'''
lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase = rename_key(SCREAMING_SNAKE_CASE )
lowerCAmelCase = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCAmelCase , lowerCAmelCase = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
lowerCAmelCase = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 46
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 0
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class A__ :
def __init__( self : Tuple , _a : Any , _a : int , _a : int ) -> List[str]:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_SCREAMING_SNAKE_CASE =img
_SCREAMING_SNAKE_CASE =img.shape[1]
_SCREAMING_SNAKE_CASE =img.shape[0]
_SCREAMING_SNAKE_CASE =dst_width
_SCREAMING_SNAKE_CASE =dst_height
_SCREAMING_SNAKE_CASE =self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE =self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE =_SCREAMING_SNAKE_CASE =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def A ( self : Any ) -> Tuple:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_SCREAMING_SNAKE_CASE =self.img[self.get_y(_a )][self.get_x(_a )]
def A ( self : int , _a : int ) -> int:
'''simple docstring'''
return int(self.ratio_x * x )
def A ( self : Dict , _a : int ) -> int:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase : Optional[Any] = 8_0_0, 6_0_0
lowerCamelCase : str = imread("image_data/lena.jpg", 1)
lowerCamelCase : Optional[int] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 47
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 0
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ (enum.Enum ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Any = 1
lowerCamelCase_ : int = 2
@add_end_docstrings(lowerCAmelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase : Union[str, Any] = None
if self.model.config.prefix is not None:
lowerCamelCase : Dict = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._sanitize_parameters(prefix=UpperCamelCase__ , **self._forward_params )
lowerCamelCase : Any = {**self._preprocess_params, **preprocess_params}
lowerCamelCase : Union[str, Any] = {**self._forward_params, **forward_params}
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Union[str, Any]:
lowerCamelCase : List[Any] = {}
if prefix is not None:
lowerCamelCase : List[Any] = prefix
if prefix:
lowerCamelCase : Any = self.tokenizer(
UpperCamelCase__ , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
lowerCamelCase : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowerCamelCase : List[str] = handle_long_generation
preprocess_params.update(UpperCamelCase__ )
lowerCamelCase : List[Any] = generate_kwargs
lowerCamelCase : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowerCamelCase : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowerCamelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase : int = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase : Tuple = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowerCamelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*UpperCamelCase__ , **UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , **UpperCamelCase__ ) -> str:
lowerCamelCase : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
lowerCamelCase : int = prompt_text
if handle_long_generation == "hole":
lowerCamelCase : Any = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
lowerCamelCase : Any = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowerCamelCase : List[str] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase : Optional[int] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = model_inputs["input_ids"]
lowerCamelCase : Tuple = model_inputs.get("attention_mask" , UpperCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase : List[Any] = None
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = 1
else:
lowerCamelCase : Any = input_ids.shape[0]
lowerCamelCase : Tuple = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase : Union[str, Any] = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowerCamelCase : Any = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase : Any = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase : List[str] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase : List[str] = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase : Dict = generated_sequence.reshape(UpperCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase : int = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=ReturnType.FULL_TEXT , UpperCamelCase__=True ) -> str:
lowerCamelCase : Optional[int] = model_outputs["generated_sequence"][0]
lowerCamelCase : Tuple = model_outputs["input_ids"]
lowerCamelCase : int = model_outputs["prompt_text"]
lowerCamelCase : Any = generated_sequence.numpy().tolist()
lowerCamelCase : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase : Optional[int] = self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase : Optional[Any] = 0
else:
lowerCamelCase : int = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase : int = prompt_text + text[prompt_length:]
else:
lowerCamelCase : Optional[Any] = text[prompt_length:]
lowerCamelCase : Optional[Any] = {"generated_text": all_text}
records.append(UpperCamelCase__ )
return records
| 48
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__snake_case :Dict = logging.getLogger(__name__)
def __snake_case ( ):
__a = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_UpperCAmelCase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__a = parser.parse_args()
return args
def __snake_case ( _UpperCAmelCase ):
def fn(_UpperCAmelCase ):
return tokenizer(examples['''text'''] )
return fn
def __snake_case ( _UpperCAmelCase ):
__a = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__a = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__a = tf.train.Features(feature=_UpperCAmelCase )
__a = tf.train.Example(features=_UpperCAmelCase )
__a = example.SerializeToString()
records.append(_UpperCAmelCase )
return records
def __snake_case ( _UpperCAmelCase ):
__a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__a = min(len(_UpperCAmelCase ) , args.limit )
__a = dataset.select(range(_UpperCAmelCase ) )
print(f'Limiting the dataset to {args.limit} entries.' )
__a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__a = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
else:
__a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__a = tokenize_function(_UpperCAmelCase )
__a = dataset.map(_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCAmelCase ):
# Concatenate all texts.
__a = {k: sum(examples[k] , [] ) for k in examples.keys()}
__a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__a = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__a = dataset_tokenized.map(_UpperCAmelCase , batched=_UpperCAmelCase , batch_size=1000 , num_proc=4 )
__a = 0
__a = 0
for shard in range(0 , len(_UpperCAmelCase ) , args.shard_size ):
__a = grouped_dataset[shard : shard + args.shard_size]
__a = len(dataset_snapshot['''input_ids'''] )
__a = os.path.join(_UpperCAmelCase , f'dataset-{shard_count}-{records_containing}.tfrecord' )
__a = get_serialized_examples(_UpperCAmelCase )
with tf.io.TFRecordWriter(_UpperCAmelCase ) as out_file:
for i in range(len(_UpperCAmelCase ) ):
__a = serialized_examples[i]
out_file.write(_UpperCAmelCase )
print('''Wrote file {} containing {} records'''.format(_UpperCAmelCase , _UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , '''w''' ) as f:
print(f'Total {args.split} records: {total_records}' , file=_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[str] = parse_args()
main(args)
| 49
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Union[str, Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCamelCase__ : Tuple = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase ) , x.transpose() ) )
lowerCamelCase__ : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A_ ( self : Any ) -> int:
lowerCamelCase__ : List[Any] = np.random.randn(3 , 4 )
lowerCamelCase__ : Dict = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase ) , transpose(UpperCAmelCase ).numpy() ) )
lowerCamelCase__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : str = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase , axes=(1, 2, 0) ) , transpose(UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A_ ( self : int ) -> Union[str, Any]:
lowerCamelCase__ : int = np.random.randn(3 , 4 )
lowerCamelCase__ : Any = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase ) , transpose(UpperCAmelCase ).numpy() ) )
lowerCamelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : List[str] = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase , axes=(1, 2, 0) ) , transpose(UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : List[str] = np.random.randn(3 , 4 )
lowerCamelCase__ : Optional[int] = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase ) , np.asarray(transpose(UpperCAmelCase ) ) ) )
lowerCamelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : List[str] = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (4, 3) ) , np.reshape(UpperCAmelCase , (4, 3) ) ) )
lowerCamelCase__ : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (12, 5) ) , np.reshape(UpperCAmelCase , (12, 5) ) ) )
@require_torch
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Dict = np.random.randn(3 , 4 )
lowerCamelCase__ : Dict = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (4, 3) ) , reshape(UpperCAmelCase , (4, 3) ).numpy() ) )
lowerCamelCase__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : Union[str, Any] = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (12, 5) ) , reshape(UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def A_ ( self : Tuple ) -> List[Any]:
lowerCamelCase__ : int = np.random.randn(3 , 4 )
lowerCamelCase__ : Dict = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (4, 3) ) , reshape(UpperCAmelCase , (4, 3) ).numpy() ) )
lowerCamelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : Dict = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (12, 5) ) , reshape(UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def A_ ( self : Optional[int] ) -> str:
lowerCamelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCamelCase__ : int = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (4, 3) ) , np.asarray(reshape(UpperCAmelCase , (4, 3) ) ) ) )
lowerCamelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : Any = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (12, 5) ) , np.asarray(reshape(UpperCAmelCase , (12, 5) ) ) ) )
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase ) , np.squeeze(UpperCAmelCase ) ) )
lowerCamelCase__ : Any = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase , axis=2 ) , np.squeeze(UpperCAmelCase , axis=2 ) ) )
@require_torch
def A_ ( self : List[str] ) -> List[str]:
lowerCamelCase__ : Tuple = np.random.randn(1 , 3 , 4 )
lowerCamelCase__ : Union[str, Any] = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase ) , squeeze(UpperCAmelCase ).numpy() ) )
lowerCamelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCamelCase__ : Optional[Any] = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase , axis=2 ) , squeeze(UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : str = np.random.randn(1 , 3 , 4 )
lowerCamelCase__ : Optional[int] = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase ) , squeeze(UpperCAmelCase ).numpy() ) )
lowerCamelCase__ : List[str] = np.random.randn(1 , 4 , 1 , 5 )
lowerCamelCase__ : Optional[Any] = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase , axis=2 ) , squeeze(UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCamelCase__ : Optional[Any] = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase ) , np.asarray(squeeze(UpperCAmelCase ) ) ) )
lowerCamelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCamelCase__ : Tuple = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase , axis=2 ) , np.asarray(squeeze(UpperCAmelCase , axis=2 ) ) ) )
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase , axis=1 ) , np.expand_dims(UpperCAmelCase , axis=1 ) ) )
@require_torch
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : Any = np.random.randn(3 , 4 )
lowerCamelCase__ : Optional[int] = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase , axis=1 ) , expand_dims(UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A_ ( self : int ) -> Optional[int]:
lowerCamelCase__ : Optional[int] = np.random.randn(3 , 4 )
lowerCamelCase__ : Any = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase , axis=1 ) , expand_dims(UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCamelCase__ : Optional[Any] = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase , axis=1 ) ) ) )
| 50
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
snake_case_ : Optional[Any] = "scheduler_config.json"
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : Union[str, Any] = 4
UpperCAmelCase__ : Tuple = 5
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : jnp.ndarray
class __snake_case :
UpperCAmelCase__ : Tuple = SCHEDULER_CONFIG_NAME
UpperCAmelCase__ : Tuple = ['''dtype''']
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : List[str] = True
@classmethod
def lowerCamelCase ( cls : int , _snake_case : Dict[str, Any] = None , _snake_case : Optional[str] = None , _snake_case : Any=False , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = cls.load_config(
pretrained_model_name_or_path=_snake_case , subfolder=_snake_case , return_unused_kwargs=_snake_case , **_snake_case , )
UpperCAmelCase_ , UpperCAmelCase_ = cls.from_config(_snake_case , return_unused_kwargs=_snake_case , **_snake_case)
if hasattr(_snake_case , '''create_state''') and getattr(_snake_case , '''has_state''' , _snake_case):
UpperCAmelCase_ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCamelCase ( self : Tuple , _snake_case : Union[str, os.PathLike] , _snake_case : bool = False , **_snake_case : str):
"""simple docstring"""
self.save_config(save_directory=_snake_case , push_to_hub=_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCamelCase ( cls : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = list(set([cls.__name__] + cls._compatibles))
UpperCAmelCase_ = importlib.import_module(__name__.split('''.''')[0])
UpperCAmelCase_ = [
getattr(_snake_case , _snake_case) for c in compatible_classes_str if hasattr(_snake_case , _snake_case)
]
return compatible_classes
def A (__A : jnp.ndarray , __A : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(__A ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__A ) - x.ndim) ) , __A )
def A (__A : int , __A : Tuple=0.999 , __A : int=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(__A : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
UpperCAmelCase_ = []
for i in range(__A ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__A ) / alpha_bar(__A ) , __A ) )
return jnp.array(__A , dtype=__A )
@flax.struct.dataclass
class __snake_case :
UpperCAmelCase__ : jnp.ndarray
UpperCAmelCase__ : jnp.ndarray
UpperCAmelCase__ : jnp.ndarray
@classmethod
def lowerCamelCase ( cls : Any , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = scheduler.config
if config.trained_betas is not None:
UpperCAmelCase_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
UpperCAmelCase_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype)
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""")
UpperCAmelCase_ = 1.0 - betas
UpperCAmelCase_ = jnp.cumprod(_snake_case , axis=0)
return cls(
alphas=_snake_case , betas=_snake_case , alphas_cumprod=_snake_case , )
def A (__A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = state.alphas_cumprod
UpperCAmelCase_ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase_ = sqrt_alpha_prod.flatten()
UpperCAmelCase_ = broadcast_to_shape_from_left(__A , original_samples.shape )
UpperCAmelCase_ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase_ = sqrt_one_minus_alpha_prod.flatten()
UpperCAmelCase_ = broadcast_to_shape_from_left(__A , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A (__A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = get_sqrt_alpha_prod(__A , __A , __A , __A )
UpperCAmelCase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A (__A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = get_sqrt_alpha_prod(__A , __A , __A , __A )
UpperCAmelCase_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 51
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCamelCase : Dict = logging.getLogger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :str
_UpperCAmelCase :List[str]
_UpperCAmelCase :Optional[List[str]]
@dataclass
class A__ :
_UpperCAmelCase :List[int]
_UpperCAmelCase :List[int]
_UpperCAmelCase :Optional[List[int]] = None
_UpperCAmelCase :Optional[List[int]] = None
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = 'train'
_UpperCAmelCase :Dict = 'dev'
_UpperCAmelCase :str = 'test'
class A__ :
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCamelCase( A_ , A_ , A_ , A_ , A_=False , A_="[CLS]" , A_=1 , A_="[SEP]" , A_=False , A_=False , A_=0 , A_=0 , A_=-100 , A_=0 , A_=True , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {label: i for i, label in enumerate(A_ )}
UpperCamelCase : List[str] = []
for ex_index, example in enumerate(A_ ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" , A_ , len(A_ ) )
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
UpperCamelCase : str = tokenizer.tokenize(A_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A_ ) > 0:
tokens.extend(A_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
UpperCamelCase : Tuple = tokenizer.num_special_tokens_to_add()
if len(A_ ) > max_seq_length - special_tokens_count:
UpperCamelCase : str = tokens[: (max_seq_length - special_tokens_count)]
UpperCamelCase : str = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
UpperCamelCase : Optional[int] = [sequence_a_segment_id] * len(A_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
UpperCamelCase : Optional[Any] = [cls_token] + tokens
UpperCamelCase : Optional[Any] = [pad_token_label_id] + label_ids
UpperCamelCase : Any = [cls_token_segment_id] + segment_ids
UpperCamelCase : Any = tokenizer.convert_tokens_to_ids(A_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
UpperCamelCase : List[str] = [1 if mask_padding_with_zero else 0] * len(A_ )
# Zero-pad up to the sequence length.
UpperCamelCase : Tuple = max_seq_length - len(A_ )
if pad_on_left:
UpperCamelCase : str = ([pad_token] * padding_length) + input_ids
UpperCamelCase : Dict = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
UpperCamelCase : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
UpperCamelCase : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(A_ ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(A_ ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(A_ ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(A_ ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(A_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
UpperCamelCase : Tuple = None
features.append(
InputFeatures(
input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , label_ids=A_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( __snake_case ):
_UpperCAmelCase :List[InputFeatures]
_UpperCAmelCase :int = nn.CrossEntropyLoss().ignore_index
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_=False , A_ = Split.train , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = os.path.join(
A_ , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase : List[Any] = cached_features_file + ".lock"
with FileLock(A_ ):
if os.path.exists(A_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase : Dict = torch.load(A_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase : List[Any] = token_classification_task.read_examples_from_file(A_ , A_ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCamelCase : int = token_classification_task.convert_examples_to_features(
A_ , A_ , A_ , A_ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , A_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
_UpperCAmelCase :List[InputFeatures]
_UpperCAmelCase :int = -1_0_0
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_=False , A_ = Split.train , ):
'''simple docstring'''
UpperCamelCase : str = token_classification_task.read_examples_from_file(A_ , A_ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCamelCase : str = token_classification_task.convert_examples_to_features(
A_ , A_ , A_ , A_ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
UpperCamelCase : Optional[int] = tf.data.Dataset.from_generator(
A_ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
UpperCamelCase : Optional[int] = tf.data.Dataset.from_generator(
A_ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ):
'''simple docstring'''
return self.features[i]
| 52
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase__ ( __lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
def wrapper(*__lowercase : List[Any] , **__lowercase : Any ):
__UpperCamelCase = timeit.default_timer()
__UpperCamelCase = func(*__lowercase , **__lowercase )
__UpperCamelCase = timeit.default_timer() - starttime
return delta
__UpperCamelCase = func.__name__
return wrapper
def lowercase__ ( __lowercase : dict , __lowercase : Optional[Any]=100 , __lowercase : Dict=None ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = seq_shapes or {}
for i in range(__lowercase ):
__UpperCamelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowercase , _ArrayXD ):
__UpperCamelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowercase , datasets.Value ):
if v.dtype == "string":
__UpperCamelCase = 'The small grey turtle was surprisingly fast when challenged.'
else:
__UpperCamelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowercase , datasets.Sequence ):
while isinstance(__lowercase , datasets.Sequence ):
__UpperCamelCase = v.feature
__UpperCamelCase = seq_shapes[k]
__UpperCamelCase = np.random.rand(*__lowercase ).astype(v.dtype )
__UpperCamelCase = data
dummy_data.append((i, example) )
return dummy_data
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : str=100 , __lowercase : List[str]=None ) -> Dict:
"""simple docstring"""
__UpperCamelCase = generate_examples(__lowercase , num_examples=__lowercase , seq_shapes=__lowercase )
with ArrowWriter(features=__lowercase , path=__lowercase ) as writer:
for key, record in dummy_data:
__UpperCamelCase = features.encode_example(__lowercase )
writer.write(__lowercase )
__UpperCamelCase , __UpperCamelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
__UpperCamelCase = datasets.Dataset.from_file(filename=__lowercase , info=datasets.DatasetInfo(features=__lowercase ) )
return dataset
| 53
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9_9999_9999
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__SCREAMING_SNAKE_CASE = remaining_time[j]
__SCREAMING_SNAKE_CASE = j
__SCREAMING_SNAKE_CASE = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__SCREAMING_SNAKE_CASE = remaining_time[short]
if minm == 0:
__SCREAMING_SNAKE_CASE = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__SCREAMING_SNAKE_CASE = False
# Find finish time of current process
__SCREAMING_SNAKE_CASE = increment_time + 1
# Calculate waiting time
__SCREAMING_SNAKE_CASE = finish_time - arrival_time[short]
__SCREAMING_SNAKE_CASE = finar - burst_time[short]
if waiting_time[short] < 0:
__SCREAMING_SNAKE_CASE = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i]
__SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a__ : Optional[Any] = int(input())
a__ : Optional[int] = [0] * no_of_processes
a__ : int = [0] * no_of_processes
a__ : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a__ , a__ : Tuple = map(int, input().split())
a__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a__ : Dict = burst_time
a__ : Any = no_of_processes
a__ : Optional[int] = waiting_time
a__ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a__ : str = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 54
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=4 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxRobertaModelTester(self )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCamelCase )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
| 55
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 0
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a ( _lowerCamelCase ):
def A_ ( self : Optional[int] ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def A_ ( self : Optional[int] ):
with self.assertRaises(lowercase_ ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def A_ ( self : Dict ):
with self.assertRaises(lowercase_ ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A_ ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A_ ( self : Dict ):
snake_case_ = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def A_ ( self : Tuple ):
snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def A_ ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def A_ ( self : Tuple ):
snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def A_ ( self : Tuple ):
snake_case_ = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def A_ ( self : Optional[int] ):
import PIL.Image
snake_case_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=lowercase_ ) as mock_cast_to_python_objects:
snake_case_ = pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
snake_case_ ,snake_case_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , lowercase_ )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = pa.BufferReader(__UpperCAmelCase ) if isinstance(__UpperCAmelCase, pa.Buffer ) else pa.memory_map(__UpperCAmelCase )
snake_case_ = pa.ipc.open_stream(__UpperCAmelCase )
snake_case_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(__UpperCAmelCase ) if fields else None
with ArrowWriter(stream=__UpperCAmelCase, schema=__UpperCAmelCase, writer_batch_size=__UpperCAmelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCAmelCase, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=__UpperCAmelCase, features=__UpperCAmelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pa.ipc.open_stream(__UpperCAmelCase )
snake_case_ = f.read_all()
snake_case_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__UpperCAmelCase )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCAmelCase, writer_batch_size=__UpperCAmelCase, hash_salt='''split_name''', check_duplicates=__UpperCAmelCase, ) as writer:
with pytest.raises(__UpperCAmelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=[1, 2] )
snake_case_ ,snake_case_ = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''', [None, 2, 10] )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCAmelCase, writer_batch_size=__UpperCAmelCase, hash_salt='''split_name''', check_duplicates=__UpperCAmelCase, ) as writer:
with pytest.raises(__UpperCAmelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2}, key=10 )
snake_case_ ,snake_case_ = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''', [None, 2, 10] )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCAmelCase, writer_batch_size=__UpperCAmelCase, hash_salt='''split_name''', check_duplicates=__UpperCAmelCase, ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2}, key=2 )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(__UpperCAmelCase ) if fields else None
with ArrowWriter(stream=__UpperCAmelCase, schema=__UpperCAmelCase, writer_batch_size=__UpperCAmelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCAmelCase, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(__UpperCAmelCase ) if fields else None
with ArrowWriter(stream=__UpperCAmelCase, schema=__UpperCAmelCase, writer_batch_size=__UpperCAmelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCAmelCase, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(__UpperCAmelCase ) if fields else None
with ArrowWriter(stream=__UpperCAmelCase, schema=__UpperCAmelCase, writer_batch_size=__UpperCAmelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCAmelCase, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __magic_name__ ( ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
snake_case_ = os.path.join(__UpperCAmelCase, '''test.arrow''' )
with ArrowWriter(path=__UpperCAmelCase, schema=pa.schema(__UpperCAmelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__UpperCAmelCase, metadata=writer._schema.metadata )
_check_output(__UpperCAmelCase, 1 )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if pa.types.is_list(__UpperCAmelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if isinstance(lst[0], __UpperCAmelCase ):
change_first_primitive_element_in_list(lst[0], __UpperCAmelCase )
else:
snake_case_ = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''', [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = pa.array(TypedSequence(__UpperCAmelCase, optimized_int_type=__UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''', [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
], )
@pytest.mark.parametrize('''sequence''', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = pa.array(OptimizedTypedSequence(__UpperCAmelCase, col=__UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case_ = copy.deepcopy(__UpperCAmelCase )
snake_case_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = pa.array(OptimizedTypedSequence(__UpperCAmelCase, col=__UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''', [False, True] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=__UpperCAmelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = '''mock://dataset-train.arrow'''
with ArrowWriter(path=__UpperCAmelCase, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(__UpperCAmelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__UpperCAmelCase )
def __magic_name__ ( ) -> str:
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
with ParquetWriter(stream=__UpperCAmelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
snake_case_ ,snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pq.read_table(__UpperCAmelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''', [False, True] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
import PIL.Image
snake_case_ = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(__UpperCAmelCase, format='''png''' )
snake_case_ = pa.BufferOutputStream()
with ParquetWriter(
stream=__UpperCAmelCase, features=Features({'''image''': Image()} ), embed_local_files=__UpperCAmelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pq.read_table(__UpperCAmelCase )
snake_case_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''], __UpperCAmelCase )
with open(__UpperCAmelCase, '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
snake_case_ = pa.schema([pa.field('''col_1''', pa.string(), nullable=__UpperCAmelCase )] )
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(stream=__UpperCAmelCase ) as writer:
writer._build_writer(inferred_schema=__UpperCAmelCase )
assert writer._schema == pa.schema([pa.field('''col_1''', pa.string() )] )
| 56
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__lowerCamelCase = """sshleifer/mar_enro_6_3_student"""
class UpperCAmelCase ( A_ ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case : List[str] = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=snake_case__ , )
snake_case : Union[str, Any] = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
MarianMTModel.from_pretrained(snake_case__ )
@slow
@require_torch_gpu
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
snake_case : int = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
snake_case : Optional[int] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
snake_case : Optional[int] = bash_script.replace(snake_case__ , str(snake_case__ ) )
snake_case : int = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
snake_case : List[str] = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
snake_case : Optional[Any] = ["finetune.py"] + bash_script.split() + args
with patch.object(snake_case__ , "argv" , snake_case__ ):
snake_case : Dict = argparse.ArgumentParser()
snake_case : List[str] = pl.Trainer.add_argparse_args(snake_case__ )
snake_case : Dict = SummarizationModule.add_model_specific_args(snake_case__ , os.getcwd() )
snake_case : Union[str, Any] = parser.parse_args()
snake_case : int = main(snake_case__ )
# Check metrics
snake_case : List[Any] = load_json(model.metrics_save_path )
snake_case : Union[str, Any] = metrics["val"][0]
snake_case : Dict = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , snake_case__ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
snake_case : int = os.listdir(snake_case__ )
snake_case : Any = [x for x in contents if x.endswith(".ckpt" )][0]
snake_case : List[Any] = os.path.join(args.output_dir , snake_case__ )
snake_case : str = torch.load(snake_case__ , map_location="cpu" )
snake_case : Any = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
snake_case : Any = {os.path.basename(snake_case__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class UpperCAmelCase ( A_ ):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : Any = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
snake_case : List[Any] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_28,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
snake_case : Dict = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
snake_case : Union[str, Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
snake_case : str = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
snake_case : Union[str, Any] = bash_script.replace(snake_case__ , str(snake_case__ ) )
snake_case : List[str] = self.get_auto_remove_tmp_dir()
snake_case : Any = bash_script.replace("--fp16" , "" )
snake_case : Tuple = 6
snake_case : str = (
["distillation.py"]
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
"--gpus=1",
"--learning_rate=1e-3",
f"""--num_train_epochs={epochs}""",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(snake_case__ , "argv" , snake_case__ ):
snake_case : List[str] = argparse.ArgumentParser()
snake_case : int = pl.Trainer.add_argparse_args(snake_case__ )
snake_case : Tuple = SummarizationDistiller.add_model_specific_args(snake_case__ , os.getcwd() )
snake_case : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
snake_case : Dict = distill_main(snake_case__ )
# Check metrics
snake_case : Tuple = load_json(model.metrics_save_path )
snake_case : Optional[int] = metrics["val"][0]
snake_case : int = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , snake_case__ )
# check lightning ckpt can be loaded and has a reasonable statedict
snake_case : int = os.listdir(snake_case__ )
snake_case : str = [x for x in contents if x.endswith(".ckpt" )][0]
snake_case : List[Any] = os.path.join(args.output_dir , snake_case__ )
snake_case : str = torch.load(snake_case__ , map_location="cpu" )
snake_case : Dict = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
snake_case : Dict = {os.path.basename(snake_case__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 59
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 60
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """bert-generation"""
def __init__( self , lowercase_=5_0358 , lowercase_=1024 , lowercase_=24 , lowercase_=16 , lowercase_=4096 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=0 , lowercase_=2 , lowercase_=1 , lowercase_="absolute" , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Any = use_cache
| 61
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =generate_pascal_triangle(SCREAMING_SNAKE_CASE__ )
for row_idx in range(SCREAMING_SNAKE_CASE__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__UpperCamelCase =[]
for current_row_idx in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
triangle.append(SCREAMING_SNAKE_CASE__ )
return triangle
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase =1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ):
calculate_current_element(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return current_row
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
__UpperCamelCase =triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase =triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase =above_to_left_elt + above_to_right_elt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__UpperCamelCase =[[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[0] + result[-1] + [0]
__UpperCamelCase =row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase =sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) )
__UpperCamelCase =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase =row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE__ )
return result
def _UpperCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : int ) -> None:
__UpperCamelCase =F'{func.__name__}({value})'
__UpperCamelCase =timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 62
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Optional[Any] = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 4_00_00_00 ):
"""simple docstring"""
_snake_case : Dict = [0, 1]
_snake_case : int = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_snake_case : str = 0
for j in range(len(snake_case__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 0
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = data
UpperCAmelCase__ = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def lowercase_ (__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FFF_FFF
def lowercase_ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = b"\x80" + b"\x00" * (6_3 - (len(self.data ) + 8) % 6_4)
UpperCAmelCase__ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = list(struct.unpack(">16L" , __UpperCAmelCase ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
UpperCAmelCase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.padding()
UpperCAmelCase__ = self.split_blocks()
for block in self.blocks:
UpperCAmelCase__ = self.expand_block(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
UpperCAmelCase__ = (b & c) | ((~b) & d)
UpperCAmelCase__ = 0X5A_827_999
elif 2_0 <= i < 4_0:
UpperCAmelCase__ = b ^ c ^ d
UpperCAmelCase__ = 0X6E_D9E_BA1
elif 4_0 <= i < 6_0:
UpperCAmelCase__ = (b & c) | (b & d) | (c & d)
UpperCAmelCase__ = 0X8F_1BB_CDC
elif 6_0 <= i < 8_0:
UpperCAmelCase__ = b ^ c ^ d
UpperCAmelCase__ = 0XCA_62C_1D6
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = (
self.rotate(__UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(__UpperCAmelCase , 3_0 ),
c,
d,
)
UpperCAmelCase__ = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = B"Test String"
assert SHAaHash(__A ).final_hash() == hashlib.shaa(__A ).hexdigest() # noqa: S324
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument("--file", dest="input_file", help="Hash contents of a file" )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
UpperCAmelCase__ = f.read()
else:
UpperCAmelCase__ = bytes(__A, "utf-8" )
print(SHAaHash(__A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 65
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 0
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__a = True
except (ImportError, AttributeError):
__a = object
def A_ ( *_lowercase, **_lowercase ):
'''simple docstring'''
pass
__a = False
__a = logging.get_logger("transformers-cli/serving")
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(_lowercase, args.host, args.port, args.workers )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : dict
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[str]
_A : Optional[List[int]]
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : str
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Any
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( snake_case: ArgumentParser ) -> Tuple:
snake_case_ :Any = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=snake_case , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=snake_case , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=snake_case , default=8_888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=snake_case , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=snake_case , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=snake_case , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=snake_case , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=snake_case , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=snake_case )
def __init__( self: int , snake_case: Pipeline , snake_case: str , snake_case: int , snake_case: int ) -> List[Any]:
snake_case_ :Optional[Any] = pipeline
snake_case_ :Optional[Any] = host
snake_case_ :Optional[Any] = port
snake_case_ :Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f"""Serving model over {host}:{port}""" )
snake_case_ :List[str] = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=snake_case , response_class=snake_case , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ),
] , timeout=600 , )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCAmelCase_ ( self: Any ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase_ ( self: Tuple , snake_case: str = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) ) -> Union[str, Any]:
try:
snake_case_ :Dict = self._pipeline.tokenizer.tokenize(snake_case )
if return_ids:
snake_case_ :int = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case )
return ServeTokenizeResult(tokens=snake_case , tokens_ids=snake_case )
else:
return ServeTokenizeResult(tokens=snake_case )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(snake_case )} )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List[int] = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) , ) -> Union[str, Any]:
try:
snake_case_ :Dict = self._pipeline.tokenizer.decode(snake_case , snake_case , snake_case )
return ServeDeTokenizeResult(model="""""" , text=snake_case )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(snake_case )} )
async def lowerCAmelCase_ ( self: Dict , snake_case: Optional[int]=Body(snake_case , embed=snake_case ) ) -> Union[str, Any]:
# Check we don't have empty string
if len(snake_case ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case_ :List[str] = self._pipeline(snake_case )
return ServeForwardResult(output=snake_case )
except Exception as e:
raise HTTPException(500 , {"""error""": str(snake_case )} )
| 66
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCamelCase = DisjunctiveConstraint(a )
self.assertTrue(isinstance(dc.token_ids , a ) )
with self.assertRaises(a ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(a ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(a ):
DisjunctiveConstraint(a ) # fails here
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCamelCase = DisjunctiveConstraint(a )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
__lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
__lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(3 )
__lowerCamelCase = stepped is True and completed is True and reset is False
self.assertTrue(a )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCamelCase = DisjunctiveConstraint(a )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 67
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=False , ) -> List[Any]:
'''simple docstring'''
A__ = size if size is not None else {"height": 20, "width": 20}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_reduce_labels
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(dataset[0]["file"] )
A__ = Image.open(dataset[1]["file"] )
return image, map
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(ds[0]["file"] )
A__ = Image.open(ds[1]["file"] )
A__ = Image.open(ds[2]["file"] )
A__ = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = BeitImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "center_crop" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , lowercase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowercase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
A__ = []
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A__ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
A__ , A__ = prepare_semantic_batch_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
A__ = True
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 68
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
# Initialise PyTorch model
snake_case_ = LxmertConfig.from_json_file(UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 69
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 0
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
A__ : int ='''base_with_context'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase = weights[f"layers_{lyr_num}"]
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = ly_weight["""attention"""]
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase = weights[f"layers_{lyr_num}"]
_lowerCAmelCase = ly_weight["""attention"""]
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase = weights[f"layers_{lyr_num}"]
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowerCAmelCase = ly_weight["""self_attention"""]
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase = ly_weight["""MultiHeadDotProductAttention_0"""]
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase = jnp.tree_util.tree_map(onp.array , lowerCAmelCase )
_lowerCAmelCase = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
_lowerCAmelCase = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
_lowerCAmelCase = inference.parse_training_gin_file(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = inference.InferenceModel(args.checkpoint_path , lowerCAmelCase )
_lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
_lowerCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowerCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowerCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , lowerCAmelCase )
_lowerCAmelCase = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , lowerCAmelCase )
_lowerCAmelCase = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , lowerCAmelCase )
_lowerCAmelCase = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_lowerCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=lowerCAmelCase , continuous_encoder=lowerCAmelCase , decoder=lowerCAmelCase , scheduler=lowerCAmelCase , melgan=lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
A__ : int =argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
A__ : int =parser.parse_args()
main(args)
| 70
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Dict = logging.get_logger(__name__)
A_ :Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""megatron-bert"""
def __init__( self , lowerCamelCase__=29056 , lowerCamelCase__=1024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4096 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[str] =vocab_size
__UpperCamelCase : Tuple =hidden_size
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : str =num_attention_heads
__UpperCamelCase : int =hidden_act
__UpperCamelCase : int =intermediate_size
__UpperCamelCase : Optional[int] =hidden_dropout_prob
__UpperCamelCase : List[Any] =attention_probs_dropout_prob
__UpperCamelCase : List[str] =max_position_embeddings
__UpperCamelCase : Optional[int] =type_vocab_size
__UpperCamelCase : Optional[Any] =initializer_range
__UpperCamelCase : Dict =layer_norm_eps
__UpperCamelCase : Optional[int] =position_embedding_type
__UpperCamelCase : Union[str, Any] =use_cache
| 71
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = '''realm'''
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 ,SCREAMING_SNAKE_CASE__ : int=1_2_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : Any=8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : int="gelu_new" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-12 ,SCREAMING_SNAKE_CASE__ : Dict=2_5_6 ,SCREAMING_SNAKE_CASE__ : str=1_0 ,SCREAMING_SNAKE_CASE__ : Any=1E-3 ,SCREAMING_SNAKE_CASE__ : List[str]=5 ,SCREAMING_SNAKE_CASE__ : str=3_2_0 ,SCREAMING_SNAKE_CASE__ : int=1_3_3_5_3_7_1_8 ,SCREAMING_SNAKE_CASE__ : Dict=5_0_0_0 ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
# Common config
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = retriever_proj_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : str = num_attention_heads
__lowerCamelCase : List[Any] = num_candidates
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : Dict = type_vocab_size
__lowerCamelCase : List[str] = layer_norm_eps
# Reader config
__lowerCamelCase : Dict = span_hidden_size
__lowerCamelCase : List[Any] = max_span_width
__lowerCamelCase : Dict = reader_layer_norm_eps
__lowerCamelCase : List[str] = reader_beam_size
__lowerCamelCase : Optional[Any] = reader_seq_len
# Retrieval config
__lowerCamelCase : Optional[Any] = num_block_records
__lowerCamelCase : Optional[Any] = searcher_beam_size
| 73
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_lowercase = '''sshleifer/student_marian_en_ro_6_1'''
_lowercase = '''sshleifer/tiny-mbart'''
@require_torch
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple:
A = self.run_trainer(
eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,)
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
if not do_eval:
return
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
self.run_seqaseq_quick(distributed=A_ )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.run_seqaseq_quick(
distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ )
@require_apex
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
A = experiments[experiment_id]
A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
A = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] )
A = len(re.findall(A_ ,cl.err ) )
self.assertEqual(A_ ,data['n_matches'] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.run_trainer(
eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,)
# Check metrics
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
# test if do_predict saves generations and metrics
A = os.listdir(A_ )
A = {os.path.basename(A_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A_ : str ) -> Tuple[int, float]:
A = '--skip_memory_metrics 0'
A = self.run_trainer(
max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,)
# Check metrics
A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history
A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
A = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A = gpu_peak_mem_orig + gpu_alloc_mem_orig
A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,)
self.assertGreater(
A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,)
self.assertEqual(
A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict:
A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
A = self.get_auto_remove_tmp_dir()
A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split()
A = '\n --do_predict\n '.split()
A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A = get_gpu_count()
A = get_torch_dist_unique_port()
A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A_ ,env=self.get_env() )
else:
A = ['run_translation.py'] + args
with patch.object(A_ ,'argv' ,A_ ):
main()
return output_dir
| 74
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def a_ ( __snake_case : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ =[]
for line in lines:
lowerCamelCase_ =re.sub(r'''#.*''' , '''''' , __snake_case ) # remove comments
if line:
filtered_lines.append(__snake_case )
lowerCamelCase_ ='''\n'''.join(__snake_case )
# Make a hash from all this code
lowerCamelCase_ =full_str.encode('''utf-8''' )
return shaaaa(__snake_case ).hexdigest()
# get importable module names and hash for caching
a_ : int = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
a_ : Union[str, Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
a_ : List[str] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
a_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 75
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = (32, 32)
SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a )
return image
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
def extract(*a : List[Any] , **a : List[str] ):
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = torch.ones([0] )
def __UpperCamelCase ( self : Union[str, Any] , a : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.pixel_values.to(a )
return self
return Out()
return extract
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , )
SCREAMING_SNAKE_CASE : Any = self.dummy_vae
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sd_pipe([prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a , )[0]
SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Tuple = PNDMScheduler(skip_prk_steps=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : str = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe([prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=a ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = sd_pipe(
[prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a , )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a )
assert isinstance(a , a )
assert isinstance(pipe.scheduler , a )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE : Optional[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : List[str] = PNDMScheduler(skip_prk_steps=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : List[str] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
SCREAMING_SNAKE_CASE : Union[str, Any] = unet.half()
SCREAMING_SNAKE_CASE : str = vae.half()
SCREAMING_SNAKE_CASE : Dict = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a )
SCREAMING_SNAKE_CASE : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
SCREAMING_SNAKE_CASE : Tuple = 40_0366_0346
SCREAMING_SNAKE_CASE : Dict = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : int = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : Any = output.images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE : int = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : str = "padme amidala taking a bath artwork, safe for work, no nudity"
SCREAMING_SNAKE_CASE : List[Any] = 27_3497_1755
SCREAMING_SNAKE_CASE : List[Any] = 7
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Dict = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
SCREAMING_SNAKE_CASE : int = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Tuple = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : int = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
SCREAMING_SNAKE_CASE : int = 10_4435_5234
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
SCREAMING_SNAKE_CASE : int = torch.manual_seed(a )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
[prompt] , generator=a , guidance_scale=a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 76
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : str = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = "camembert"
def __init__( self , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=2 , a=0.02 , a=1e-12 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ) -> Any:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : Any = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = intermediate_size
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : str = position_embedding_type
lowercase__ : Optional[Any] = use_cache
lowercase__ : Any = classifier_dropout
class UpperCAmelCase_ ( _a):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 77
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """whisper"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Optional[Any] , lowercase_ :Dict=5_18_65 , lowercase_ :Tuple=80 , lowercase_ :str=6 , lowercase_ :Optional[Any]=4 , lowercase_ :Union[str, Any]=6 , lowercase_ :Any=4 , lowercase_ :Optional[int]=15_36 , lowercase_ :Optional[Any]=15_36 , lowercase_ :Tuple=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[Any]=5_02_57 , lowercase_ :str=True , lowercase_ :Optional[int]=True , lowercase_ :Tuple="gelu" , lowercase_ :Union[str, Any]=2_56 , lowercase_ :Optional[int]=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[str]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :Any=False , lowercase_ :str=15_00 , lowercase_ :Union[str, Any]=4_48 , lowercase_ :List[str]=5_02_56 , lowercase_ :Any=5_02_56 , lowercase_ :Optional[int]=5_02_56 , lowercase_ :List[Any]=None , lowercase_ :Dict=[2_20, 5_02_56] , lowercase_ :Optional[int]=False , lowercase_ :Optional[int]=2_56 , lowercase_ :Tuple=False , lowercase_ :Any=0.05 , lowercase_ :Tuple=10 , lowercase_ :Dict=2 , lowercase_ :Union[str, Any]=0.0 , lowercase_ :List[str]=10 , lowercase_ :str=0 , lowercase_ :Tuple=7 , **lowercase_ :Optional[int] , ) -> Optional[Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Any ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase = {0: 'batch'}
else:
UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase__ ( self :int , lowercase_ :Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ :int = -1 , lowercase_ :int = -1 , lowercase_ :bool = False , lowercase_ :Optional["TensorType"] = None , lowercase_ :int = 2_20_50 , lowercase_ :float = 5.0 , lowercase_ :int = 2_20 , ) -> Mapping[str, Any]:
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase = encoder_inputs['input_features'].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = encoder_inputs.pop('input_features' )
UpperCAmelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase__ ( self :Dict ) -> float:
return 1E-3
| 78
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 0
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''image_processor''', '''tokenizer''']
snake_case = '''AutoImageProcessor'''
snake_case = '''AutoTokenizer'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
_A = kwargs.pop("feature_extractor" )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
_A = self.image_processor
_A = False
def __call__( self : int , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Dict ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
_A = kwargs.pop("images" , __UpperCAmelCase )
_A = kwargs.pop("text" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_A = args[0]
_A = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_A = self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
_A = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_A = encodings["input_ids"]
return inputs
def lowerCAmelCase ( self : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
_A = True
_A = self.tokenizer
yield
_A = self.image_processor
_A = False
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=None ):
'''simple docstring'''
if added_vocab is None:
_A = self.tokenizer.get_added_vocab()
_A = {}
while tokens:
_A = re.search(R"<s_(.*?)>" , __UpperCAmelCase , re.IGNORECASE )
if start_token is None:
break
_A = start_token.group(1 )
_A = re.search(Rf'''</s_{key}>''' , __UpperCAmelCase , re.IGNORECASE )
_A = start_token.group()
if end_token is None:
_A = tokens.replace(__UpperCAmelCase , "" )
else:
_A = end_token.group()
_A = re.escape(__UpperCAmelCase )
_A = re.escape(__UpperCAmelCase )
_A = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCAmelCase , re.IGNORECASE )
if content is not None:
_A = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_A = self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if value:
if len(__UpperCAmelCase ) == 1:
_A = value[0]
_A = value
else: # leaf nodes
_A = []
for leaf in content.split(R"<sep/>" ):
_A = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_A = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCAmelCase )
if len(output[key] ) == 1:
_A = output[key][0]
_A = tokens[tokens.find(__UpperCAmelCase ) + len(__UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if len(__UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 79
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 0
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase_ :
def __init__( self , a = "cpu" , a = "openai/clip-vit-large-patch14" ):
UpperCamelCase__ = device
UpperCamelCase__ = CLIPTokenizerFast.from_pretrained(a )
UpperCamelCase__ = [0.4814_5466, 0.457_8275, 0.4082_1073]
UpperCamelCase__ = [0.2686_2954, 0.2613_0258, 0.2757_7711]
UpperCamelCase__ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCamelCase__ = torchvision.transforms.Resize(2_24 )
UpperCamelCase__ = torchvision.transforms.CenterCrop(2_24 )
def __a ( self , a ):
UpperCamelCase__ = self.resize(a )
UpperCamelCase__ = self.center_crop(a )
UpperCamelCase__ = self.normalize(a )
return images
def __call__( self , a=None , a=None , **a ):
UpperCamelCase__ = self.tokenizer(text=a , **a )
UpperCamelCase__ = self.preprocess_img(a )
UpperCamelCase__ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase_ ( nn.Module ):
def __init__( self , a=10 , a=0.01 , a=None , a=None , a=None , a=None , a=None , a=None , a=False , a=True , a="image" , a=True , a=False , a=False , a=False , ):
super().__init__()
UpperCamelCase__ = None
UpperCamelCase__ = device if device else get_device()
if vqgan:
UpperCamelCase__ = vqgan
else:
UpperCamelCase__ = load_vqgan(self.device , conf_path=a , ckpt_path=a )
self.vqgan.eval()
if clip:
UpperCamelCase__ = clip
else:
UpperCamelCase__ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
UpperCamelCase__ = ProcessorGradientFlow(device=self.device )
UpperCamelCase__ = iterations
UpperCamelCase__ = lr
UpperCamelCase__ = log
UpperCamelCase__ = make_grid
UpperCamelCase__ = return_val
UpperCamelCase__ = quantize
UpperCamelCase__ = self.vqgan.decoder.z_shape
def __a ( self , a=None , a=None , a=5 , a=True ):
UpperCamelCase__ = []
if output_path is None:
UpperCamelCase__ = "./animation.gif"
if input_path is None:
UpperCamelCase__ = self.save_path
UpperCamelCase__ = sorted(glob(input_path + "/*" ) )
if not len(a ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(a ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
UpperCamelCase__ = total_duration / len(a )
UpperCamelCase__ = [frame_duration] * len(a )
if extend_frames:
UpperCamelCase__ = 1.5
UpperCamelCase__ = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(a ) )
imageio.mimsave(a , a , duration=a )
print(f'''gif saved to {output_path}''' )
def __a ( self , a=None , a=None ):
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
UpperCamelCase__ = preprocess(Image.open(a ) , target_image_size=2_56 ).to(self.device )
UpperCamelCase__ = preprocess_vqgan(a )
UpperCamelCase__ , *UpperCamelCase__ = self.vqgan.encode(a )
return z
def __a ( self , a ):
UpperCamelCase__ = self.latent.detach().requires_grad_()
UpperCamelCase__ = base_latent + transform_vector
if self.quantize:
UpperCamelCase__ , *UpperCamelCase__ = self.vqgan.quantize(a )
else:
UpperCamelCase__ = trans_latent
return self.vqgan.decode(a )
def __a ( self , a , a , a=None ):
UpperCamelCase__ = self.clip_preprocessor(text=a , images=a , return_tensors="pt" , padding=a )
UpperCamelCase__ = self.clip(**a )
UpperCamelCase__ = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase__ = similarity_logits * weights
return similarity_logits.sum()
def __a ( self , a , a , a ):
UpperCamelCase__ = self._get_clip_similarity(pos_prompts["prompts"] , a , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
UpperCamelCase__ = self._get_clip_similarity(neg_prompts["prompts"] , a , weights=neg_prompts["weights"] )
else:
UpperCamelCase__ = torch.tensor([1] , device=self.device )
UpperCamelCase__ = -torch.log(a ) + torch.log(a )
return loss
def __a ( self , a , a , a ):
UpperCamelCase__ = torch.randn_like(self.latent , requires_grad=a , device=self.device )
UpperCamelCase__ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase__ = self._add_vector(a )
UpperCamelCase__ = loop_post_process(a )
UpperCamelCase__ = self._get_CLIP_loss(a , a , a )
print("CLIP loss" , a )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __a ( self , a , a , a ):
wandb.init(reinit=a , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
UpperCamelCase__ = Image.open(a )
UpperCamelCase__ = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(a ) )
def __a ( self , a ):
if not prompts:
return []
UpperCamelCase__ = []
UpperCamelCase__ = []
if isinstance(a , a ):
UpperCamelCase__ = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(a , (tuple, list) ):
UpperCamelCase__ = prompt[0]
UpperCamelCase__ = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase__ , UpperCamelCase__ = prompt.split(":" )
UpperCamelCase__ = float(a )
else:
UpperCamelCase__ = prompt
UpperCamelCase__ = 1.0
processed_prompts.append(a )
weights.append(a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a , device=self.device ),
}
def __a ( self , a , a=None , a=None , a=True , a=False , a=True , a=True , a=None , ):
if image_path:
UpperCamelCase__ = self._get_latent(a )
else:
UpperCamelCase__ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(a , a , a )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase__ = self.process_prompts(a )
UpperCamelCase__ = self.process_prompts(a )
if save_final and save_path is None:
UpperCamelCase__ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(a ):
os.makedirs(a )
else:
UpperCamelCase__ = save_path + "_" + get_timestamp()
os.makedirs(a )
UpperCamelCase__ = save_path
UpperCamelCase__ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(a ) )
UpperCamelCase__ = loop_post_process(a )
for iter, transformed_img in enumerate(self._optimize_CLIP(a , a , a ) ):
if show_intermediate:
show_pil(a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"Image": wandb.Image(a )} )
if show_final:
show_pil(a )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 80
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
else:
return a * actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
def _A ( lowercase , lowercase ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowercase , lowercase )
return actual_power(lowercase , lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 81
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
import cva
import numpy as np
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
if k in (0.04, 0.06):
_lowerCAmelCase = k
_lowerCAmelCase = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = cva.imread(_snake_case , 0 )
_lowerCAmelCase , _lowerCAmelCase = img.shape
_lowerCAmelCase = []
_lowerCAmelCase = img.copy()
_lowerCAmelCase = cva.cvtColor(_snake_case , cva.COLOR_GRAY2RGB )
_lowerCAmelCase , _lowerCAmelCase = np.gradient(_snake_case )
_lowerCAmelCase = dx**2
_lowerCAmelCase = dy**2
_lowerCAmelCase = dx * dy
_lowerCAmelCase = 0.04
_lowerCAmelCase = self.window_size // 2
for y in range(_snake_case , h - offset ):
for x in range(_snake_case , w - offset ):
_lowerCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase = (wxx * wyy) - (wxy**2)
_lowerCAmelCase = wxx + wyy
_lowerCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A__ = HarrisCorner(0.0_4, 3)
A__ , A__ = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 82
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314
| 0
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Dict = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
snake_case_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def A__ ( UpperCAmelCase_ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCamelCase : Any = model_type_to_module_name(UpperCAmelCase_ )
_UpperCamelCase : List[str] = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCAmelCase_ , '__name__' , UpperCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCamelCase : List[Any] = importlib.import_module('transformers' )
if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
return None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , **UpperCAmelCase_ , ):
_UpperCamelCase : Optional[Any] = get_file_from_repo(
UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(UpperCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(UpperCAmelCase_ )
class lowercase__ :
def __init__( self : Tuple ):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase__ )
def UpperCamelCase_ ( cls : Tuple ,lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = kwargs.pop('config' ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = kwargs.pop('trust_remote_code' ,lowerCamelCase__ )
_UpperCamelCase : Tuple = True
_UpperCamelCase , _UpperCamelCase : Dict = ImageProcessingMixin.get_image_processor_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : List[Any] = config_dict.get('image_processor_type' ,lowerCamelCase__ )
_UpperCamelCase : Tuple = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_UpperCamelCase : Optional[int] = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCamelCase : Optional[Any] = config_dict.pop('feature_extractor_type' ,lowerCamelCase__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_UpperCamelCase : int = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_UpperCamelCase : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_UpperCamelCase : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = AutoConfig.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
# It could be in `config.image_processor_type``
_UpperCamelCase : List[Any] = getattr(lowerCamelCase__ ,'image_processor_type' ,lowerCamelCase__ )
if hasattr(lowerCamelCase__ ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_UpperCamelCase : List[str] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_UpperCamelCase : Union[str, Any] = image_processor_class_from_name(lowerCamelCase__ )
_UpperCamelCase : str = image_processor_auto_map is not None
_UpperCamelCase : Any = image_processor_class is not None or type(lowerCamelCase__ ) in IMAGE_PROCESSOR_MAPPING
_UpperCamelCase : Any = resolve_trust_remote_code(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if has_remote_code and trust_remote_code:
_UpperCamelCase : str = get_class_from_dynamic_module(
lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = kwargs.pop('code_revision' ,lowerCamelCase__ )
if os.path.isdir(lowerCamelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCamelCase__ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCamelCase : int = IMAGE_PROCESSOR_MAPPING[type(lowerCamelCase__ )]
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def UpperCamelCase_ ( lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(lowerCamelCase__ ,lowerCamelCase__ )
| 83
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model'}
__UpperCAmelCase = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A=False , __A=True , __A=False , __A="<s>" , __A="</s>" , __A="<unk>" , __A="<sep>" , __A="<pad>" , __A="<cls>" , __A="<mask>" , __A=["<eop>", "<eod>"] , __A = None , **__A , ) -> None:
lowerCAmelCase_ :int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCAmelCase_ :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCAmelCase_ :List[Any] = 3
lowerCAmelCase_ :Any = do_lower_case
lowerCAmelCase_ :Any = remove_space
lowerCAmelCase_ :Dict = keep_accents
lowerCAmelCase_ :List[Any] = vocab_file
lowerCAmelCase_ :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCAmelCase_ :Dict = jieba
lowerCAmelCase_ :List[str] = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self ) -> Any:
return len(self.sp_model )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = self.__dict__.copy()
lowerCAmelCase_ :Optional[int] = None
return state
def __setstate__( self , __A ) -> Tuple:
lowerCAmelCase_ :List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ :List[Any] = {}
lowerCAmelCase_ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , __A ) -> List[str]:
if self.remove_space:
lowerCAmelCase_ :int = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase_ :Any = inputs
lowerCAmelCase_ :Dict = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCAmelCase_ :int = unicodedata.normalize("""NFKD""" , __A )
lowerCAmelCase_ :List[Any] = """""".join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
lowerCAmelCase_ :Union[str, Any] = outputs.lower()
return outputs
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :Dict = self.preprocess_text(__A )
lowerCAmelCase_ :Tuple = self.sp_model.encode(__A , out_type=__A )
lowerCAmelCase_ :List[str] = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase_ :Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ :List[str] = cur_pieces[1:]
else:
lowerCAmelCase_ :List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def __lowerCAmelCase ( self , __A ) -> Any:
return self.sp_model.PieceToId(__A )
def __lowerCAmelCase ( self , __A ) -> str:
return self.sp_model.IdToPiece(__A )
def __lowerCAmelCase ( self , __A ) -> int:
lowerCAmelCase_ :Dict = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :str = [self.sep_token_id]
lowerCAmelCase_ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1, 1]
return ([0] * len(__A )) + [1, 1]
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :List[str] = [self.sep_token_id]
lowerCAmelCase_ :Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :Union[str, Any] = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
lowerCAmelCase_ :Tuple = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def __lowerCAmelCase ( self , *__A , **__A ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = super()._decode(*__A , **__A )
lowerCAmelCase_ :Any = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 84
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Tuple = "xmod"
def __init__( self , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , a__=False , a__=2 , a__=False , a__=True , a__=True , a__=("en_XX",) , a__=None , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
snake_case_ = pre_norm
snake_case_ = adapter_reduction_factor
snake_case_ = adapter_layer_norm
snake_case_ = adapter_reuse_layer_norm
snake_case_ = ln_before_adapter
snake_case_ = list(a__ )
snake_case_ = default_language
class _snake_case ( lowercase_ ):
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A__ ( _lowerCamelCase):
A_ : torch.FloatTensor
A_ : torch.FloatTensor
A_ : Optional[torch.FloatTensor] = None
class A__ ( _lowerCamelCase , _lowerCamelCase):
A_ : Any = 2
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 1.007 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.05 , _SCREAMING_SNAKE_CASE = 50 , ):
# standard deviation of the initial noise distribution
__lowerCAmelCase : Optional[int] = sigma_max
# setable values
__lowerCAmelCase : int = None
__lowerCAmelCase : np.IntTensor = None
__lowerCAmelCase : torch.FloatTensor = None # sigma(t_i)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
return sample
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : List[Any] = num_inference_steps
__lowerCAmelCase : int = np.arange(0 , self.num_inference_steps )[::-1].copy()
__lowerCAmelCase : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__lowerCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if self.config.s_min <= sigma <= self.config.s_max:
__lowerCAmelCase : Dict = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__lowerCAmelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=_SCREAMING_SNAKE_CASE ).to(sample.device )
__lowerCAmelCase : List[str] = sigma + gamma * sigma
__lowerCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ):
__lowerCAmelCase : int = sample_hat + sigma_hat * model_output
__lowerCAmelCase : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
__lowerCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ):
__lowerCAmelCase : int = sample_prev + sigma_prev * model_output
__lowerCAmelCase : int = (sample_prev - pred_original_sample) / sigma_prev
__lowerCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise NotImplementedError()
| 86
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCamelCase = HfArgumentParser(InitializationArguments)
UpperCamelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCamelCase = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
UpperCamelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCamelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 87
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[str]=[10, 20, 30, 40] , UpperCamelCase__ : Optional[int]=[1, 1, 2, 1] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = embeddings_size
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_act
__magic_name__ = num_labels
__magic_name__ = scope
__magic_name__ = len(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = self.get_config()
return config, pixel_values
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
__magic_name__ = FlaxRegNetModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = FlaxRegNetForImageClassification(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a__ = False
a__ = False
a__ = False
def _lowercase ( self : int ) -> None:
"""simple docstring"""
__magic_name__ = FlaxRegNetModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest("""JIT Enabled""" ):
__magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""np""" )
__magic_name__ = model(**UpperCamelCase__ )
# verify the logits
__magic_name__ = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__magic_name__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 88
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Dict ,_UpperCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
_a : int = nn.ModuleList(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : torch.FloatTensor ,_UpperCAmelCase : Union[torch.Tensor, float, int] ,_UpperCAmelCase : torch.Tensor ,_UpperCAmelCase : List[torch.tensor] ,_UpperCAmelCase : List[float] ,_UpperCAmelCase : Optional[torch.Tensor] = None ,_UpperCAmelCase : Optional[torch.Tensor] = None ,_UpperCAmelCase : Optional[torch.Tensor] = None ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = True ,):
for i, (image, scale, controlnet) in enumerate(zip(_UpperCAmelCase ,_UpperCAmelCase ,self.nets ) ):
_a , _a : Optional[Any] = controlnet(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
# merge samples
if i == 0:
_a , _a : int = down_samples, mid_sample
else:
_a : Union[str, Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_UpperCAmelCase ,_UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, os.PathLike] ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Callable = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[str] = None ,):
_a : Union[str, Any] = 0
_a : Union[str, Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_UpperCAmelCase ,is_main_process=_UpperCAmelCase ,save_function=_UpperCAmelCase ,safe_serialization=_UpperCAmelCase ,variant=_UpperCAmelCase ,)
idx += 1
_a : str = model_path_to_save + F"""_{idx}"""
@classmethod
def __lowercase ( cls : str ,_UpperCAmelCase : Optional[Union[str, os.PathLike]] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = 0
_a : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_a : str = pretrained_model_path
while os.path.isdir(_UpperCAmelCase ):
_a : int = ControlNetModel.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
controlnets.append(_UpperCAmelCase )
idx += 1
_a : int = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(_UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(_UpperCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(_UpperCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(_UpperCAmelCase )
| 89
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 0
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
def remove_articles(UpperCamelCase__ : Union[str, Any] ):
__lowerCamelCase = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(UpperCamelCase__ , ' ' , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ : List[str] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 100
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowerCamelCase = Counter(UpperCamelCase__ )
__lowerCamelCase = Counter(UpperCamelCase__ )
__lowerCamelCase = Counter()
for sgram, scount in sgramcounter.items():
__lowerCamelCase = scount * numref
__lowerCamelCase = Counter(UpperCamelCase__ )
__lowerCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
__lowerCamelCase = ccount * numref
# KEEP
__lowerCamelCase = sgramcounter_rep & cgramcounter_rep
__lowerCamelCase = keepgramcounter_rep & rgramcounter
__lowerCamelCase = sgramcounter_rep & rgramcounter
__lowerCamelCase = 0
__lowerCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase = 1
__lowerCamelCase = 1
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowerCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowerCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowerCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowerCamelCase = sgramcounter_rep - cgramcounter_rep
__lowerCamelCase = delgramcounter_rep - rgramcounter
__lowerCamelCase = sgramcounter_rep - rgramcounter
__lowerCamelCase = 0
__lowerCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase = 1
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
__lowerCamelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
__lowerCamelCase = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
__lowerCamelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
__lowerCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase = 1
__lowerCamelCase = 1
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = addtmpscore / len(UpperCamelCase__ )
__lowerCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowerCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = len(UpperCamelCase__ )
__lowerCamelCase = ssent.split(' ' )
__lowerCamelCase = csent.split(' ' )
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for rsent in rsents:
__lowerCamelCase = rsent.split(' ' )
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
__lowerCamelCase = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
__lowerCamelCase = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
__lowerCamelCase = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
__lowerCamelCase = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
__lowerCamelCase = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
__lowerCamelCase = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
__lowerCamelCase = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
__lowerCamelCase = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
__lowerCamelCase = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowerCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
__lowerCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
__lowerCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "13a" , UpperCamelCase__ : bool = True ) -> List[str]:
"""simple docstring"""
if lowercase:
__lowerCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowerCamelCase = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
__lowerCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
__lowerCamelCase = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
__lowerCamelCase = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
__lowerCamelCase = sentence
if not return_str:
__lowerCamelCase = normalized_sent.split()
return normalized_sent
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError('Sources length must match predictions and references lengths.' )
__lowerCamelCase = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
__lowerCamelCase = sari_score / len(UpperCamelCase__ )
return 100 * sari_score
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any="exp" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Union[str, Any]=False , ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
__lowerCamelCase = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = {}
result.update({'sari': compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'exact': compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 90
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "WhisperFeatureExtractor"
__UpperCamelCase = "WhisperTokenizer"
def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : Dict):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : Dict = False
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=True):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowercase_ , language=lowercase_ , no_timestamps=lowercase_)
def __call__( self : Dict , *lowercase_ : Dict , **lowercase_ : Union[str, Any]):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('''sampling_rate''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''text''' , lowercase_)
if len(lowercase_) > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE_ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
if text is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(lowercase_ , **lowercase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encodings['''input_ids''']
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : Optional[Any]="np"):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(lowercase_ , return_tensors=lowercase_)
| 91
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 0
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _a ( *SCREAMING_SNAKE_CASE_ : List[Any] ):
with open(SCREAMING_SNAKE_CASE_ , "r" ) as fh:
fcntl.flock(SCREAMING_SNAKE_CASE_ , fcntl.LOCK_EX )
try:
print(*SCREAMING_SNAKE_CASE_ )
finally:
fcntl.flock(SCREAMING_SNAKE_CASE_ , fcntl.LOCK_UN )
UpperCamelCase__ = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
UpperCamelCase__ = torch.device("""cuda""", local_rank)
UpperCamelCase__ = socket.gethostname()
UpperCamelCase__ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCamelCase__ = dist.get_rank()
UpperCamelCase__ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 92
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if n_term == "":
return []
lowercase_ : list = []
for temp in range(int(__SCREAMING_SNAKE_CASE ) ):
series.append(F'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
_lowercase : Tuple = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 93
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 0
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowerCamelCase ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
a :Optional[int] = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowerCamelCase ( ):
"""simple docstring"""
assert _test_patching.open is open
a :Dict = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , UpperCAmelCase_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowerCamelCase ( ):
"""simple docstring"""
a :Any = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , UpperCAmelCase_ ):
pass
def __lowerCamelCase ( ):
"""simple docstring"""
a :Any = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , UpperCAmelCase_ ) is None
with patch_submodule(_test_patching , '''len''' , UpperCAmelCase_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = '''__test_patch_submodule_start_and_stop_mock__'''
a :Any = patch_submodule(_test_patching , '''open''' , UpperCAmelCase_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowerCamelCase ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
a :Dict = '''__test_patch_submodule_successive_join__'''
a :List[str] = '''__test_patch_submodule_successive_dirname__'''
a :Any = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , UpperCAmelCase_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , UpperCAmelCase_ ):
pass
| 94
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
UpperCAmelCase : str = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase : List[Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase : Any = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase : int = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase : List[str] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 95
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = len(lowercase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCamelCase : Any = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
_lowerCamelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_lowerCamelCase : Union[str, Any] = left
_lowerCamelCase : int = point
elif point > right:
_lowerCamelCase : Any = right
_lowerCamelCase : List[str] = point
else:
if item < current_item:
_lowerCamelCase : List[Any] = point - 1
else:
_lowerCamelCase : str = point + 1
return None
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif point > right:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , lowercase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , point + 1 , lowercase__ )
def _snake_case ( lowercase__ ):
if collection != sorted(lowercase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase__ = 0
if debug == 1:
lowercase__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
lowercase__ = 67
lowercase__ = interpolation_search(collection, target)
if result is not None:
print(F"{target} found at positions: {result}")
else:
print("""Not found""")
| 96
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
'''simple docstring'''
__snake_case = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
__snake_case = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
UpperCamelCase__ :int = from_type.lower().strip('''s''' )
UpperCamelCase__ :Tuple = to_type.lower().strip('''s''' )
UpperCamelCase__ :str = UNIT_SYMBOL.get(__a , __a )
UpperCamelCase__ :Dict = UNIT_SYMBOL.get(__a , __a )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ :Dict = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(__a )}'''
)
raise ValueError(__a )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ :int = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(__a )}'''
)
raise ValueError(__a )
UpperCamelCase__ :Optional[int] = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ :Any = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ :Union[str, Any] = 1
if from_exponent > to_exponent:
UpperCamelCase__ :List[str] = from_exponent - to_exponent
else:
UpperCamelCase__ :Dict = -(to_exponent - from_exponent)
return value * pow(10 , __a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 97
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A_ ( A__ , A__ ) -> np.array:
a__ : Optional[Any] = F'{sampling_rate}'
a__ : Optional[int] = '1'
a__ : Any = 'f32le'
a__ : Dict = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
a__ : List[str] = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
a__ : Union[str, Any] = output_stream[0]
a__ : Any = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def A_ ( A__ , A__ , A__ = "f32le" , ) -> int:
a__ : Optional[int] = F'{sampling_rate}'
a__ : Optional[Any] = '1'
if format_for_conversion == "s16le":
a__ : List[str] = 2
elif format_for_conversion == "f32le":
a__ : int = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
a__ : List[Any] = platform.system()
if system == "Linux":
a__ : Any = 'alsa'
a__ : Optional[Any] = 'default'
elif system == "Darwin":
a__ : Union[str, Any] = 'avfoundation'
a__ : Any = ':0'
elif system == "Windows":
a__ : Union[str, Any] = 'dshow'
a__ : Optional[int] = 'default'
a__ : Dict = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
a__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
a__ : str = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def A_ ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ) -> Union[str, Any]:
if stream_chunk_s is not None:
a__ : Union[str, Any] = stream_chunk_s
else:
a__ : Dict = chunk_length_s
a__ : Tuple = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
a__ : str = np.intaa
a__ : Optional[int] = 2
elif format_for_conversion == "f32le":
a__ : Tuple = np.floataa
a__ : Optional[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
a__ : str = chunk_length_s / 6
a__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
a__ : Any = [stride_length_s, stride_length_s]
a__ : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
a__ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
a__ : List[str] = datetime.datetime.now()
a__ : Tuple = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
a__ : str = np.frombuffer(item['raw'] , dtype=A__ )
a__ : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
a__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A_ ( A__ , A__ , A__ , A__ = False ) -> Tuple:
a__ : Optional[Any] = B''
a__ , a__ : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
a__ : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
a__ : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
a__ : int = (_stride_left, stride_right)
a__ : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
a__ : int = False
yield item
a__ : Union[str, Any] = stride_left
a__ : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
a__ : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
a__ : List[str] = False
yield item
def A_ ( A__ , A__ ) -> Tuple:
a__ : Union[str, Any] = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
a__ : str = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 99
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 0
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = XLNetTokenizer
__lowercase : List[str] = XLNetTokenizerFast
__lowercase : List[Any] = True
__lowercase : int = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<s>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_6)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 100
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 0
|
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 101
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 102
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
from __future__ import annotations
def UpperCamelCase( __UpperCamelCase : str ):
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def UpperCamelCase( __UpperCamelCase : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase( ):
lowerCAmelCase_ : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' ,__UpperCamelCase )
print('''Decoded:''' ,decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 103
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase , __lowercase = 1, 1
__lowercase = []
for i in range(1 , n + 1 ):
__lowercase = prev_numerator + 2 * prev_denominator
__lowercase = prev_numerator + prev_denominator
if len(str(A__ ) ) > len(str(A__ ) ):
result.append(A__ )
__lowercase = numerator
__lowercase = denominator
return len(A__ )
if __name__ == "__main__":
print(f'{solution() = }')
| 104
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : float ) ->float:
'''simple docstring'''
return 10 - x * x
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if equation(_lowercase ) * equation(_lowercase ) >= 0:
raise ValueError("Wrong space!" )
a : List[Any] = a
while (b - a) >= 0.01:
# Find middle point
a : Dict = (a + b) / 2
# Check if middle point is root
if equation(_lowercase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowercase ) * equation(_lowercase ) < 0:
a : Optional[Any] = c
else:
a : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 105
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 0
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase_ : int | None = None ):
lowerCAmelCase__ : List[str] = value
lowerCAmelCase__ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} ,indent=1 )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int ,lowercase_ : Node | None = None ):
lowerCAmelCase__ : Union[str, Any] = root
def __str__( self : Union[str, Any] ):
return str(self.root )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Node ,lowercase_ : Node | None ):
if new_children is not None: # reset its kids
lowerCAmelCase__ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_ ): # If it is the right children
lowerCAmelCase__ : Union[str, Any] = new_children
else:
lowerCAmelCase__ : Optional[Any] = new_children
else:
lowerCAmelCase__ : List[str] = new_children
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Node ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __lowerCAmelCase ( self : List[str] ):
return self.root is None
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = Node(lowercase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ : Dict = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ : Union[str, Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ : int = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ : Any = new_node
break
else:
lowerCAmelCase__ : List[str] = parent_node.right
lowerCAmelCase__ : Dict = parent_node
def __lowerCAmelCase ( self : str ,*lowercase_ : Dict ):
for value in values:
self.__insert(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Any ):
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ : Tuple = node.left if value < node.value else node.right
return node
def __lowerCAmelCase ( self : int ,lowercase_ : Node | None = None ):
if node is None:
if self.root is None:
return None
lowerCAmelCase__ : str = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ : List[Any] = node.right
return node
def __lowerCAmelCase ( self : Any ,lowercase_ : Node | None = None ):
if node is None:
lowerCAmelCase__ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ : str = self.root
while node.left is not None:
lowerCAmelCase__ : int = node.left
return node
def __lowerCAmelCase ( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : Tuple = self.search(lowercase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ ,lowercase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ ,node.left )
else:
lowerCAmelCase__ : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Node | None ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __lowerCAmelCase ( self : str ,lowercase_ : List[str]=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __lowerCAmelCase ( self : Dict ,lowercase_ : list ,lowercase_ : Node | None ):
if node:
self.inorder(lowercase_ ,node.left )
arr.append(node.value )
self.inorder(lowercase_ ,node.right )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int ,lowercase_ : Node ):
lowerCAmelCase__ : list[int] = []
self.inorder(lowercase_ ,lowercase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
lowerCAmelCase__ : int = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ : Optional[int] = BinarySearchTree()
for i in testlist:
t.insert(A_ )
# Prints all the elements of the list in order traversal
print(A_ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A_ )
print(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 106
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 0
|
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Dict = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Union[str, Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : List[Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , ) -> Dict:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a = np.array([re.sub(__lowerCamelCase , "" , __lowerCamelCase ) for x in predictions] )
a = np.array([re.sub(__lowerCamelCase , "" , __lowerCamelCase ) for x in references] )
else:
a = np.asarray(__lowerCamelCase )
a = np.asarray(__lowerCamelCase )
if ignore_case:
a = np.char.lower(__lowerCamelCase )
a = np.char.lower(__lowerCamelCase )
if ignore_punctuation:
a = string.punctuation.maketrans("" , "" , string.punctuation )
a = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
a = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
if ignore_numbers:
a = string.digits.maketrans("" , "" , string.digits )
a = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
a = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
a = predictions == references
return {"exact_match": np.mean(__lowerCamelCase ) * 1_00}
| 107
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 0
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCAmelCase__ = pytest.mark.integration
lowerCAmelCase__ = {'''comet'''}
lowerCAmelCase__ = importlib.util.find_spec('''fairseq''') is not None
lowerCAmelCase__ = {'''code_eval'''}
lowerCAmelCase__ = os.name == '''nt'''
lowerCAmelCase__ = {'''bertscore''', '''frugalscore''', '''perplexity'''}
lowerCAmelCase__ = importlib.util.find_spec('''transformers''') is not None
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : Tuple , SCREAMING_SNAKE_CASE : str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase , lowercase , lowercase )
@local
class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ):
"""simple docstring"""
a : int ={}
a : List[str] =None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "[...]"
lowerCAmelCase : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , snake_case__ ) ).module_path )
lowerCAmelCase : List[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case__ )
# check parameters
lowerCAmelCase : Optional[Any] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase : Optional[Any] = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "[...]"
lowerCAmelCase : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , snake_case__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase : List[Any] = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case__ ):
yield
else:
yield
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
def load_local_metric(snake_case__ , *snake_case__ , **snake_case__ ):
return load_metric(os.path.join("metrics" , snake_case__ ) , *snake_case__ , **snake_case__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase : Dict = load_local_metric
yield
@classmethod
def lowercase__ ( cls , snake_case__ ):
"""simple docstring"""
def wrapper(snake_case__ ):
lowerCAmelCase : Dict = contextmanager(snake_case__ )
lowerCAmelCase : List[str] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase : Optional[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase : Tuple = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
def load_from_checkpoint(SCREAMING_SNAKE_CASE : Optional[Any] ):
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
assert len(snake_case__ ) == 2
lowerCAmelCase : Union[str, Any] = [0.19, 0.92]
return scores, sum(snake_case__ ) / len(snake_case__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase : Tuple = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase : List[Any] = load_from_checkpoint
yield
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase : List[Any] = "ERROR"
lowerCAmelCase : Optional[int] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(SCREAMING_SNAKE_CASE , match=re.escape(SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE )
| 108
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 0
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : str ):
return " ".join(
"""""".join(word[::-1] ) if len(UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 109
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase : List[str] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (A__ ):
"""simple docstring"""
def __init__( self : Tuple ,*lowercase__ : Optional[int] ,**lowercase__ : Any ):
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' ,__lowerCamelCase ,)
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
| 104
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 0
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class A_ ( A__ ):
@require_torch
def lowercase ( self : Optional[Any] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__lowerCamelCase )
BertModel.from_pretrained(__lowerCamelCase )
BertTokenizer.from_pretrained(__lowerCamelCase )
pipeline(task="fill-mask" , model=__lowerCamelCase )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowercase ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__lowerCamelCase )
BertModel.from_pretrained(__lowerCamelCase )
BertTokenizer.from_pretrained(__lowerCamelCase )
pipeline(task="fill-mask" , model=__lowerCamelCase )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowercase ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowercase ( self : str ):
_UpperCAmelCase = "\nfrom transformers import pipeline\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
_UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = "1"
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
_UpperCAmelCase = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def lowercase ( self : List[Any] ):
_UpperCAmelCase = "\nfrom transformers import AutoModel\n "
_UpperCAmelCase = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = "1"
_UpperCAmelCase = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 22
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 0
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase : Tuple = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = {}
state_dict.pop('''pixel_mean''' , _A )
state_dict.pop('''pixel_std''' , _A )
__UpperCAmelCase = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__UpperCAmelCase = key.replace(_A , _A )
if re.match(_A , _A ):
__UpperCAmelCase = int(re.match(_A , _A ).group(2 ) )
if layer_nb == 0:
__UpperCAmelCase = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
__UpperCAmelCase = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
__UpperCAmelCase = key.replace('''layers.2''' , '''proj_out''' )
__UpperCAmelCase = value
__UpperCAmelCase = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :Dict , snake_case_ :Tuple="ybelkada/segment-anything" ):
__UpperCAmelCase = hf_hub_download(_A , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
__UpperCAmelCase = SamConfig()
elif "sam_vit_l" in model_name:
__UpperCAmelCase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__UpperCAmelCase = SamConfig(
vision_config=_A , )
elif "sam_vit_h" in model_name:
__UpperCAmelCase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__UpperCAmelCase = SamConfig(
vision_config=_A , )
__UpperCAmelCase = torch.load(_A , map_location='''cpu''' )
__UpperCAmelCase = replace_keys(_A )
__UpperCAmelCase = SamImageProcessor()
__UpperCAmelCase = SamProcessor(image_processor=_A )
__UpperCAmelCase = SamModel(_A )
hf_model.load_state_dict(_A )
__UpperCAmelCase = hf_model.to('''cuda''' )
__UpperCAmelCase = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__UpperCAmelCase = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
__UpperCAmelCase = [[[400, 650]]]
__UpperCAmelCase = [[1]]
__UpperCAmelCase = processor(images=np.array(_A ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase = hf_model(**_A )
__UpperCAmelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
__UpperCAmelCase = processor(
images=np.array(_A ) , input_points=_A , input_labels=_A , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase = hf_model(**_A )
__UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
__UpperCAmelCase = ((75, 275, 1_725, 850),)
__UpperCAmelCase = processor(images=np.array(_A ) , input_boxes=_A , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase = hf_model(**_A )
__UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
__UpperCAmelCase = [[[400, 650], [800, 650]]]
__UpperCAmelCase = [[1, 1]]
__UpperCAmelCase = processor(
images=np.array(_A ) , input_points=_A , input_labels=_A , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase = hf_model(**_A )
__UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
_lowercase : List[Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
_lowercase : Dict = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 332
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 0
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Dict = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a : str = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : List[str] = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : Any = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a : Any = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __lowerCamelCase ( ) -> str:
UpperCAmelCase : str = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCAmelCase : Any = False
# source code of `config_class`
UpperCAmelCase : str = inspect.getsource(_A )
UpperCAmelCase : Any = _re_checkpoint.findall(_A )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase : Tuple = True
break
UpperCAmelCase : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_A )
if len(_A ) > 0:
UpperCAmelCase : Dict = """\n""".join(sorted(_A ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 265
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 0
|
def _a ( UpperCamelCase_ : int = 50_000_000 ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = int((limit - 24) ** (1 / 2) )
lowerCAmelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _A ) ) )
for primea in primes:
lowerCAmelCase__ = primea * primea
for primea in primes:
lowerCAmelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCAmelCase__ = primea * primea * primea * primea
lowerCAmelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(_A )
return len(_A )
if __name__ == "__main__":
print(F"{solution() = }")
| 340
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = a.name
__UpperCAmelCase : str = b.name
__UpperCAmelCase : Optional[int] = """"""
__UpperCAmelCase : Tuple = """"""
__UpperCAmelCase : int = a == b
__UpperCAmelCase : List[Any] = name_a
__UpperCAmelCase : Dict = name_b
return res
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_A , _A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
_graph_replace_input_with(node_proto.attribute[1].g , _A , _A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_A , _A , _A )
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = list(model.graph.initializer )
__UpperCAmelCase : Union[str, Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__UpperCAmelCase : Tuple = inits[i].name
__UpperCAmelCase : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _A , _A )
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = os.path.dirname(_A )
__UpperCAmelCase : str = os.path.basename(_A )
__UpperCAmelCase : str = onnx.load(os.path.join(_A , _A ) )
__UpperCAmelCase : List[Any] = list(model.graph.initializer )
__UpperCAmelCase : Any = set()
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = 0
for i in range(len(_A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_A )
dup_set.add(_A )
__UpperCAmelCase : List[Any] = inits[j].data_type
__UpperCAmelCase : Optional[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , _A )
total_reduced_size += mem_size
__UpperCAmelCase : Union[str, Any] = inits[i].name
__UpperCAmelCase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_A )
else:
__UpperCAmelCase : Optional[Any] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
__UpperCAmelCase : int = sorted(_A )
_remove_dup_initializers_from_model(_A , _A , _A )
__UpperCAmelCase : Optional[Any] = """optimized_""" + model_file_name
__UpperCAmelCase : Tuple = os.path.join(_A , _A )
onnx.save(_A , _A )
return new_model
| 115
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
lowerCAmelCase__ = KandinskyVaaControlnetPipeline
lowerCAmelCase__ = ['image_embeds', 'negative_image_embeds', 'hint']
lowerCAmelCase__ = ['image_embeds', 'negative_image_embeds', 'hint']
lowerCAmelCase__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase__ = False
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return 32
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return 32
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return 100
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
torch.manual_seed(0 )
lowerCamelCase_ = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase_ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__lowerCamelCase , )
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> Tuple:
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create hint
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase_ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = "cpu"
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**__lowerCamelCase )
lowerCamelCase_ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
lowerCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCamelCase_ = torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 2_5_5.0
lowerCamelCase_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCamelCase_ = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase_ = "A robot, 4k photo"
lowerCamelCase_ = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCamelCase_ = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCamelCase_ = pipeline(
image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , output_type="np" , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 19
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
snake_case : Union[str, Any] = getLogger(__name__)
snake_case : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : List[str] = 8 , _snake_case : int = DEFAULT_DEVICE , _snake_case : Tuple=False , _snake_case : int="summarization" , _snake_case : str=None , **_snake_case : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[Any] = Path(_A ).open("w" , encoding="utf-8" )
__magic_name__ : List[str] = str(_A )
__magic_name__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_A ).to(_A )
if fpaa:
__magic_name__ : Tuple = model.half()
__magic_name__ : Tuple = AutoTokenizer.from_pretrained(_A )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
__magic_name__ : Dict = time.time()
# update config with task specific params
use_task_specific_params(_A , _A )
if prefix is None:
__magic_name__ : str = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(_A , _A ) ) ):
__magic_name__ : Optional[int] = [prefix + text for text in examples_chunk]
__magic_name__ : List[str] = tokenizer(_A , return_tensors="pt" , truncation=_A , padding="longest" ).to(_A )
__magic_name__ : List[Any] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_A , )
__magic_name__ : Optional[int] = tokenizer.batch_decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
__magic_name__ : Tuple = int(time.time() - start_time ) # seconds
__magic_name__ : str = len(_A )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def lowerCAmelCase_ ( _snake_case : Dict=True ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("model_name" , type=_A , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=_A , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=_A , help="where to save summaries" )
parser.add_argument("--reference_path" , type=_A , required=_A , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=_A , required=_A , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=_A , required=_A , default=_A , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=_A , required=_A , default=_A , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=_A , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_A , default=8 , required=_A , help="batch size" )
parser.add_argument(
"--n_obs" , type=_A , default=-1 , required=_A , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=_A , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__magic_name__ , __magic_name__ : List[str] = parser.parse_known_args()
__magic_name__ : Any = parse_numeric_n_bool_cl_kwargs(_A )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
__magic_name__ : int = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__magic_name__ : List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_A )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can\'t mix --fp16 and --device cpu" )
__magic_name__ : Dict = generate_summaries_or_translations(
_A , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_A , )
if args.reference_path is None:
return {}
# Compute scores
__magic_name__ : List[Any] = calculate_bleu if "translation" in args.task else calculate_rouge
__magic_name__ : str = [x.rstrip() for x in open(args.save_path ).readlines()]
__magic_name__ : List[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_A )]
__magic_name__ : Optional[Any] = score_fn(_A , _A )
scores.update(_A )
if args.dump_args:
scores.update(_A )
if args.info:
__magic_name__ : Union[str, Any] = args.info
if verbose:
print(_A )
if args.score_path is not None:
json.dump(_A , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 281
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowercase_ ( A__ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : str = False
UpperCAmelCase_ : List[str] = 3.0
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCamelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCAmelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
lowerCAmelCase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowerCAmelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __lowerCamelCase )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase__ : str = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase__ : List[Any] = torch.nn.Linear(1_0_0, 2_0_0)
lowercase__ : Dict = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase__ : Dict = ''''''
lowercase__ : str = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 338
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.