code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any="pt" ) -> List[str]:
SCREAMING_SNAKE_CASE_ = {'add_prefix_space': True} if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ = padding_side
return tokenizer(
[line] , max_length=__UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = input_ids.ne(__UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]="train" , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]="" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ = max_source_length
SCREAMING_SNAKE_CASE_ = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
def __len__( self : Tuple ):
return len(self.src_lens )
def __getitem__( self : List[str] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ = self.prefix + linecache.getline(str(self.src_file ) , _lowerCAmelCase ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ = linecache.getline(str(self.tgt_file ) , _lowerCAmelCase ).rstrip('\n' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
SCREAMING_SNAKE_CASE_ = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : Optional[int] ):
return [len(_lowerCAmelCase ) for x in Path(_lowerCAmelCase ).open().readlines()]
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = trim_batch(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = trim_batch(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCamelCase__ : List[str] = getLogger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : List[List] ) -> Tuple:
return list(itertools.chain.from_iterable(__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = get_git_info()
save_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'git_log.json' ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=4 , **__UpperCAmelCase : Tuple ) -> str:
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
with open(__UpperCAmelCase ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Tuple:
SCREAMING_SNAKE_CASE_ = git.Repo(search_parent_directories=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'repo_id': str(__UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __UpperCAmelCase : Callable , __UpperCAmelCase : Iterable ) -> List:
return list(map(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
with open(__UpperCAmelCase , 'wb' ) as f:
return pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
def remove_articles(__UpperCAmelCase : Any ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __UpperCAmelCase )
def white_space_fix(__UpperCAmelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__UpperCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCAmelCase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCAmelCase ) ) ) )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = normalize_answer(__UpperCAmelCase ).split()
SCREAMING_SNAKE_CASE_ = normalize_answer(__UpperCAmelCase ).split()
SCREAMING_SNAKE_CASE_ = Counter(__UpperCAmelCase ) & Counter(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Any:
return normalize_answer(__UpperCAmelCase ) == normalize_answer(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Dict:
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 0
for hypo, pred in zip(__UpperCAmelCase , __UpperCAmelCase ):
em += exact_match_score(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
em /= len(__UpperCAmelCase )
return {"em": em}
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
return model_prefix.startswith('rag' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ = 'dropout_rate'
for p in extra_params:
if getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ) and not hasattr(__UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
continue
SCREAMING_SNAKE_CASE_ = p if hasattr(__UpperCAmelCase , __UpperCAmelCase ) else equivalent_param[p]
setattr(__UpperCAmelCase , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
return hparams, config | 225 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCamelCase__ : Optional[Any] = '</w>'
lowerCamelCase__ : Union[str, Any] = '@@ '
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
return pairs
# Speech2Text2 has no max input length
lowerCamelCase__ : Any = {'facebook/s2t-wav2vec2-large-en-de': 1_024}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : Any="<pad>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : int="<unk>" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Tuple , ):
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
else:
with open(_lowerCAmelCase , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n' )[:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
@property
def lowerCAmelCase_ ( self : List[str] ):
return len(self.decoder )
def lowerCAmelCase_ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(_lowerCAmelCase ):
try:
SCREAMING_SNAKE_CASE_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE_ = '\n' + BPE_TOKEN_MERGES
if word.endswith(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = word.replace(_lowerCAmelCase , '' )
SCREAMING_SNAKE_CASE_ = word.replace(' ' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = word
return word
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ = text.lower()
SCREAMING_SNAKE_CASE_ = text.split()
SCREAMING_SNAKE_CASE_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(' ' ) ) )
return split_tokens
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = self.decoder.get(_lowerCAmelCase , self.unk_token )
return result
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE_ = ''.join(string.split(_lowerCAmelCase ) )
return string
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '\n' )
SCREAMING_SNAKE_CASE_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(' '.join(_lowerCAmelCase ) + '\n' )
index += 1
return (vocab_file, merges_file) | 225 | 1 |
import os
def snake_case_ () -> int:
with open(os.path.dirname(__A ) + """/p022_names.txt""" ) as file:
__lowerCAmelCase : Optional[Any] = str(file.readlines()[0] )
__lowerCAmelCase : Dict = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = 0
for i, name in enumerate(__A ):
for letter in name:
name_score += ord(__A ) - 6_4
total_score += (i + 1) * name_score
__lowerCAmelCase : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 139 |
def snake_case_ () -> List[Any]:
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def snake_case_ (__A : Dict ) -> Tuple:
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[int] = 2
while i * i <= n:
__lowerCAmelCase : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case_ () -> Dict:
return next(i for i in triangle_number_generator() if count_divisors(__A ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 139 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def snake_case ( UpperCAmelCase="" )-> str:
"""simple docstring"""
__A = tempfile.mkdtemp()
return os.path.join(UpperCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
__A = torch.rand(12 , dtype=torch.floataa ) - 0.5
__A = AgentAudio(_A )
__A = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_A , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_A ) )
# Ensure that the file contains the same value as the original tensor
__A , __A = sf.read(_A )
self.assertTrue(torch.allclose(_A , torch.tensor(_A ) , atol=1E-4 ) )
def lowercase_ ( self :str ) -> int:
'''simple docstring'''
__A = torch.rand(12 , dtype=torch.floataa ) - 0.5
__A = get_new_path(suffix='.wav' )
sf.write(_A , _A , 16_000 )
__A = AgentAudio(_A )
self.assertTrue(torch.allclose(_A , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , _A )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] ) -> Tuple:
'''simple docstring'''
__A = torch.randint(0 , 256 , (64, 64, 3) )
__A = AgentImage(_A )
__A = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_A , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_A ) )
def lowercase_ ( self :Any ) -> Optional[int]:
'''simple docstring'''
__A = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
__A = Image.open(_A )
__A = AgentImage(_A )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_A ) )
def lowercase_ ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
__A = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
__A = Image.open(_A )
__A = AgentImage(_A )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_A ) )
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
__A = 'Hey!'
__A = AgentText(_A )
self.assertEqual(_A , agent_type.to_string() )
self.assertEqual(_A , agent_type.to_raw() )
self.assertEqual(_A , _A )
| 161 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[int] , _A :str , _A :Dict ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self :List[str] , _A :int = 1 , _A :Optional[torch.Generator] = None , _A :int = 50 , _A :Optional[str] = "pil" , _A :bool = True , **_A :Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__A = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , )
__A = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(_A , _A , _A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 , 1 )
__A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(_A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_A ), "This is a local test"
| 161 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[Any] ):
"""simple docstring"""
create_state_space_tree(__SCREAMING_SNAKE_CASE , [] , 0 )
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[Any] , __SCREAMING_SNAKE_CASE : list[Any] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index == len(__SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE )
return
create_state_space_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowercase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 264 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = num_of_nodes
lowercase_ : list[list[int]] = []
lowercase_ : dict[int, int] = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase_ : Optional[int] = self.find_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase_ : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
lowercase_ : int = self.find_component(__SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = []
lowercase_ : Optional[Any] = 0
lowercase_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase_ : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase_ , lowercase_ , lowercase_ : List[Any] = edge
lowercase_ : Dict = self.m_component[u]
lowercase_ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase_ : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ , lowercase_ : str = edge
lowercase_ : Tuple = self.m_component[u]
lowercase_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase_ : str = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
def lowerCamelCase ( ):
'''simple docstring'''
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = 1
__UpperCamelCase :int = 2
while i * i <= n:
__UpperCamelCase :Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 43 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "microsoft/speecht5_tts"
_a : Tuple= (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_a : Dict= "text_reader"
_a : Optional[Any]= SpeechTaProcessor
_a : Tuple= SpeechTaForTextToSpeech
_a : Optional[int]= SpeechTaHifiGan
_a : Union[str, Any]= ["text"]
_a : Optional[int]= ["audio"]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.post_processor is None:
lowercase : Any = """microsoft/speecht5_hifigan"""
super().setup()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : int = self.pre_processor(text=snake_case ,return_tensors="""pt""" ,truncation=snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
lowercase : Tuple = load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" )
lowercase : List[str] = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(snake_case ).cpu().detach()
| 20 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 28 |
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_A : int = 299792458
# Symbols
_A : Optional[int] = symbols('''ct x y z''')
def UpperCamelCase_ ( snake_case_ : Dict ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def UpperCamelCase_ ( snake_case_ : Dict ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(_A ) ** 2 )
def UpperCamelCase_ ( snake_case_ : Tuple ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Tuple = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
__lowerCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_A : List[Any] = transform(29979245)
print('''Example of four vector: ''')
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
_A : List[Any] = {ct: c, x: 1, y: 1, z: 1}
_A : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 229 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Any = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
__UpperCamelCase : Optional[Any] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def a_ ( _A , _A=1 , _A=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a_ ( _A ) -> int:
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def a_ ( _A , _A ) -> int:
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def a_ ( _A , _A , _A , _A=True ) -> List[str]:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
snake_case__ = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
snake_case__ = read_json(os.path.join(_A , 'params.json' ) )
snake_case__ = NUM_SHARDS[model_size]
snake_case__ = params['n_layers']
snake_case__ = params['n_heads']
snake_case__ = n_heads // num_shards
snake_case__ = params['dim']
snake_case__ = dim // n_heads
snake_case__ = 10000.0
snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case__ = params['n_kv_heads'] # for GQA / MQA
snake_case__ = n_heads_per_shard // num_key_value_heads
snake_case__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case__ = n_heads
snake_case__ = n_heads_per_shard
snake_case__ = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
snake_case__ = [
torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(_A )
]
snake_case__ = 0
snake_case__ = {'weight_map': {}}
for layer_i in range(_A ):
snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case__ = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case__ = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
snake_case__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
snake_case__ = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 )
snake_case__ = inv_freq
for k, v in state_dict.items():
snake_case__ = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case__ = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
snake_case__ = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case__ = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
snake_case__ = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256
snake_case__ = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
# Initialize the tokenizer based on the `spm` model
snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case__ = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def a_ ( ) -> str:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
snake_case__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 307 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[int] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : List[Any] = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : int):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Union[str, Any] = features.copy() if features else default_expected_features
lowercase__ : List[str] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : List[Any] = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : int = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : Optional[Any] = features.copy()
lowercase__ : Optional[int] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = tmp_path / "cache"
lowercase__ : Dict = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
lowercase__ : Tuple = tmp_path / "cache"
lowercase__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Optional[Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : List[str] = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Dict = [jsonl_path]
lowercase__ : Tuple = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str]):
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : Tuple = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : List[str] = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Any):
if split:
lowercase__ : Union[str, Any] = {split: jsonl_path}
else:
lowercase__ : Union[str, Any] = "train"
lowercase__ : List[str] = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Any = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Optional[int]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[Any] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Union[str, Any] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Tuple = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Any = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[int] ) -> List[Any]:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[int] ) -> int:
lowercase__ : Tuple = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : List[Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : Optional[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : Any = f.read()
assert exported_content == original_content
| 333 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 1 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _snake_case ( _a ):
_A : str = CustomTokenizer
pass
| 139 |
'''simple docstring'''
def A_ ( snake_case ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
SCREAMING_SNAKE_CASE:Optional[int] = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
A_ = input("Enter a string ").strip()
A_ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 139 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
SCREAMING_SNAKE_CASE__ = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> List[str]:
"""simple docstring"""
snake_case = torch.load(lowercase__ , map_location='cpu' )
return sd
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=rename_keys_prefix ) -> Any:
"""simple docstring"""
snake_case = OrderedDict()
snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case = key
for name_pair in rename_keys_prefix:
snake_case = new_key.replace(name_pair[0] , name_pair[1] )
snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case = """pretraining"""
if "vcr" in checkpoint_path:
snake_case = {"""visual_embedding_dim""": 5_1_2}
elif "vqa_advanced" in checkpoint_path:
snake_case = {"""visual_embedding_dim""": 2_0_4_8}
elif "vqa" in checkpoint_path:
snake_case = {"""visual_embedding_dim""": 2_0_4_8}
elif "nlvr" in checkpoint_path:
snake_case = {"""visual_embedding_dim""": 1_0_2_4}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case = {"""visual_embedding_dim""": 5_1_2}
snake_case = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
snake_case = {"""visual_embedding_dim""": 2_0_4_8}
snake_case = """vqa_advanced"""
elif "vqa" in checkpoint_path:
snake_case = {"""visual_embedding_dim""": 2_0_4_8, """num_labels""": 3_1_2_9}
snake_case = """vqa"""
elif "nlvr" in checkpoint_path:
snake_case = {
"""visual_embedding_dim""": 1_0_2_4,
"""num_labels""": 2,
}
snake_case = """nlvr"""
snake_case = VisualBertConfig(**lowercase__ )
# Load State Dict
snake_case = load_state_dict(lowercase__ )
snake_case = get_new_dict(lowercase__ , lowercase__ )
if model_type == "pretraining":
snake_case = VisualBertForPreTraining(lowercase__ )
elif model_type == "vqa":
snake_case = VisualBertForQuestionAnswering(lowercase__ )
elif model_type == "nlvr":
snake_case = VisualBertForVisualReasoning(lowercase__ )
elif model_type == "multichoice":
snake_case = VisualBertForMultipleChoice(lowercase__ )
model.load_state_dict(lowercase__ )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 351 | """simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, "src", "transformers")
SCREAMING_SNAKE_CASE__ = "\n{0} = None\n"
SCREAMING_SNAKE_CASE__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
SCREAMING_SNAKE_CASE__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowerCAmelCase )
snake_case = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowerCAmelCase , 'tokenizers' )
snake_case = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowerCAmelCase , 'tensorflow_text' )
snake_case = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tensorflow_text' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
"""simple docstring"""
snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase )
self.assertIn('tensorflow_text' , lowerCAmelCase )
self.assertIn('sentencepiece_and_tokenizers' , lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
"""simple docstring"""
snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , '\nCONSTANT = None\n' )
snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase )
| 149 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowercase ( ):
snake_case_ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case_ : Any = g.get_repo('''huggingface/diffusers''' )
snake_case_ : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case_ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
snake_case_ : Dict = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 264 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Any = scope
snake_case_ : Dict = self.vocab_size - 1
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ):
snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ):
snake_case_ : int = self.num_labels
snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : str = config_and_inputs
snake_case_ : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ):
snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ : int = inputs_dict['''labels''']
snake_case_ : Optional[Any] = inputs_dict['''labels''']
snake_case_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _snake_case ( self : Any ):
snake_case_ : List[str] = OpenAIGPTModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 264 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Union[str, Any] = get_logger(__name__)
_lowerCAmelCase : Dict = Path(__file__).parent / "model_card_template.md"
_lowerCAmelCase : Optional[Any] = uuida().hex
_lowerCAmelCase : Any = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Dict = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def UpperCamelCase_( _snake_case : Union[Dict, str, None] = None ):
"""simple docstring"""
__a =F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def UpperCamelCase_( _snake_case : str , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ):
"""simple docstring"""
if token is None:
__a =HfFolder.get_token()
if organization is None:
__a =whoami(_snake_case )['name']
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(_snake_case , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
__a =args.hub_token if hasattr(_snake_case , 'hub_token' ) else None
__a =get_full_repo_name(_snake_case , token=_snake_case )
__a =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(_snake_case , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
__a =os.path.join(args.output_dir , 'README.md' )
model_card.save(_snake_case )
def UpperCamelCase_( _snake_case : Optional[str] , _snake_case : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
__a =str(Path(_snake_case ).as_posix() )
__a =re.search(r'snapshots/([^/]+)/' , _snake_case )
if search is None:
return None
__a =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : List[str] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
_lowerCAmelCase : Any = os.path.join(hf_cache_home, "diffusers")
def UpperCamelCase_( _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
__a =DIFFUSERS_CACHE
if old_cache_dir is None:
__a =old_diffusers_cache
__a =Path(_snake_case ).expanduser()
__a =Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a =new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : List[str] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Optional[Any] = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : str = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : Any = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Optional[int] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"the directory exists and can be written to."
)
def UpperCamelCase_( _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
__a =weights_name.split('.' )
__a =splits[:-1] + [variant] + splits[-1:]
__a ='.'.join(_snake_case )
return weights_name
def UpperCamelCase_( _snake_case : Dict , *,
_snake_case : Any , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : int , _snake_case : str , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[int]=None , ):
"""simple docstring"""
__a =str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
__a =os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
__a =os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse('0.20.0' )
):
try:
__a =hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.' , _snake_case , )
try:
# 2. Load model file as usual
__a =hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'this model name. Check the model page at '
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' )
| 353 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = 1
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Union[str, Any] = (32, 32)
_lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def _lowercase ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=__lowerCAmelCase ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
return model
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="gelu" ,projection_dim=512 ,)
return CLIPTextModel(__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : List[str] = self.dummy_cond_unet_upscale
_lowerCamelCase : Dict = DDPMScheduler()
_lowerCamelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCamelCase : List[Any] = self.dummy_vae
_lowerCamelCase : List[Any] = self.dummy_text_encoder
_lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : Tuple = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
_lowerCamelCase : Dict = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase : Any = StableDiffusionUpscalePipeline(
unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,)
_lowerCamelCase : int = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = "A painting of a squirrel eating a burger"
_lowerCamelCase : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = sd_pipe(
[prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,)
_lowerCamelCase : Any = output.images
_lowerCamelCase : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : str = sd_pipe(
[prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,return_dict=__lowerCAmelCase ,)[0]
_lowerCamelCase : str = image[0, -3:, -3:, -1]
_lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
_lowerCamelCase : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCamelCase : Union[str, Any] = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Union[str, Any] = self.dummy_cond_unet_upscale
_lowerCamelCase : Any = DDPMScheduler()
_lowerCamelCase : List[str] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCamelCase : List[Any] = self.dummy_vae
_lowerCamelCase : int = self.dummy_text_encoder
_lowerCamelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : List[Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
_lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase : str = StableDiffusionUpscalePipeline(
unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,)
_lowerCamelCase : List[str] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Tuple = "A painting of a squirrel eating a burger"
_lowerCamelCase : str = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,)
_lowerCamelCase : Dict = output.images
assert image.shape[0] == 2
_lowerCamelCase : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : Any = sd_pipe(
[prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,)
_lowerCamelCase : List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.dummy_cond_unet_upscale
_lowerCamelCase : Tuple = DDPMScheduler()
_lowerCamelCase : Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCamelCase : int = self.dummy_vae
_lowerCamelCase : Any = self.dummy_text_encoder
_lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : Union[str, Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
_lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCamelCase : Dict = unet.half()
_lowerCamelCase : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCamelCase : Optional[Any] = StableDiffusionUpscalePipeline(
unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,)
_lowerCamelCase : str = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "A painting of a squirrel eating a burger"
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = sd_pipe(
[prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="np" ,).images
_lowerCamelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_lowerCamelCase : List[str] = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCamelCase : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Dict = "a cat sitting on a park bench"
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCamelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_lowerCamelCase : str = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCamelCase : int = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : List[Any] = "a cat sitting on a park bench"
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self: Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCamelCase : List[str] = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCamelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : List[str] = "a cat sitting on a park bench"
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=5 ,output_type="np" ,)
_lowerCamelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = jsonl_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ) -> str:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return json.load(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return [json.loads(SCREAMING_SNAKE_CASE ) for line in buffer]
class A_ :
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase__ , lowercase__ , lines=lowercase__ ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(lowercase__ )
assert isinstance(lowercase__ , lowercase__ )
assert isinstance(exported_content[0] , lowercase__ )
assert len(lowercase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase__ , lowercase__ , lines=lowercase__ , orient=lowercase__ ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(lowercase__ )
assert isinstance(lowercase__ , lowercase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase__ , lowercase__ , lines=lowercase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(lowercase__ )
assert isinstance(lowercase__ , lowercase__ )
assert isinstance(exported_content[0] , lowercase__ )
assert len(lowercase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase__ , lowercase__ , lines=lowercase__ , orient=lowercase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(lowercase__ )
assert isinstance(lowercase__ , lowercase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase__ ) == 10
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[int]:
with pytest.raises(lowercase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase__ , lowercase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase__ , lowercase__ , compression=lowercase__ ).write()
with fsspec.open(lowercase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(lowercase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 333 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = emb.weight.shape
UpperCAmelCase_ : List[str] = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase )
UpperCAmelCase_ : Any = emb.weight.data
return lin_layer
def __a ( __lowerCamelCase, __lowerCamelCase=None ):
UpperCAmelCase_ : str = {}
for old_key in state_dict.keys():
UpperCAmelCase_ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ : str = key.replace("moe_layer.experts.0", f"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCAmelCase_ : Union[str, Any] = key.replace("moe_layer.experts.", "ffn.experts.expert_" )
if "gate" in key:
UpperCAmelCase_ : int = key.replace(".moe_layer.gate.wg", ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCAmelCase_ : Tuple = key.replace(".fc2.", ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCAmelCase_ : str = key.replace(".fc1.", ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCAmelCase_ : Any = key.replace(".encoder_attn.", ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ : str = key.replace("encoder_attn_layer_norm", "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCAmelCase_ : Optional[Any] = key.replace("final_layer_norm", "ff_layer_norm" )
UpperCAmelCase_ : str = state_dict[old_key]
return new_dict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = WEIGHTS_NAME ):
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : int = 0
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
for expert in range(__lowerCamelCase ):
UpperCAmelCase_ : str = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = torch.load(__lowerCamelCase )["model"]
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = rename_fairseq_keys(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Optional[int] = os.path.join(
__lowerCamelCase, weights_name.replace(".bin", f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(__lowerCamelCase, __lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCamelCase )[0]].dtype )
# Add the last block
UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase, weights_name.replace(".bin", f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
UpperCAmelCase_ : Union[str, Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = rename_fairseq_keys(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCamelCase ) == 1:
UpperCAmelCase_ : int = os.path.join(__lowerCamelCase, __lowerCamelCase )
torch.save(__lowerCamelCase, __lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCamelCase, __lowerCamelCase )
# Otherwise, let's build the index
UpperCAmelCase_ : Tuple = {}
for idx, shard in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : List[str] = weights_name.replace(".bin", f"""-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin""" )
UpperCAmelCase_ : Optional[Any] = os.path.join(__lowerCamelCase, weights_name.replace(".bin", f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
for key in shard:
UpperCAmelCase_ : str = shard_file
# Add the metadata
UpperCAmelCase_ : int = {"total_size": total_size}
UpperCAmelCase_ : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), "w", encoding="utf-8" ) as f:
UpperCAmelCase_ : int = json.dumps(__lowerCamelCase, indent=2, sort_keys=__lowerCamelCase ) + "\n"
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_a = parser.parse_args()
_a , _a = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_a = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_a = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 23 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A__: Tuple = logging.get_logger(__name__)
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self: str , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[int, float] = 1 / 255 , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __lowerCamelCase: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__lowerCamelCase: Optional[int] , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCamelCase__: List[str] = size if size is not None else {"shortest_edge": 224}
UpperCamelCase__: Dict = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCamelCase__: int = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase__: Dict = get_size_dict(__lowerCamelCase , param_name="crop_size" )
UpperCamelCase__: Tuple = do_resize
UpperCamelCase__: str = size
UpperCamelCase__: List[str] = resample
UpperCamelCase__: List[str] = do_center_crop
UpperCamelCase__: Optional[Any] = crop_size
UpperCamelCase__: Tuple = do_rescale
UpperCamelCase__: Dict = rescale_factor
UpperCamelCase__: Tuple = do_normalize
UpperCamelCase__: Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase__: Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: np.ndarray , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase__: List[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCamelCase__: List[Any] = int((256 / 224) * size["shortest_edge"] )
UpperCamelCase__: Dict = get_resize_output_image_size(__lowerCamelCase , size=__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCamelCase__: Any = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
__lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: np.ndarray , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: str , ):
'''simple docstring'''
UpperCamelCase__: List[str] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: np.ndarray , __lowerCamelCase: Union[int, float] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: Any , ):
'''simple docstring'''
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: np.ndarray , __lowerCamelCase: Union[float, List[float]] , __lowerCamelCase: Union[float, List[float]] , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: List[Any] , ):
'''simple docstring'''
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: ImageInput , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[Dict[str, int]] = None , __lowerCamelCase: PILImageResampling = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[Dict[str, int]] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[float] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[Union[float, Iterable[float]]] = None , __lowerCamelCase: Optional[Union[float, Iterable[float]]] = None , __lowerCamelCase: Optional[TensorType] = None , __lowerCamelCase: ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase: List[Any] , ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__: Any = resample if resample is not None else self.resample
UpperCamelCase__: Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__: Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__: str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__: Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__: Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__: Optional[int] = image_std if image_std is not None else self.image_std
UpperCamelCase__: Any = size if size is not None else self.size
UpperCamelCase__: Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCamelCase__: List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__: List[str] = get_size_dict(__lowerCamelCase , param_name="crop_size" )
UpperCamelCase__: Dict = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__: List[Any] = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase__: Dict = [self.resize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase__: List[str] = [self.center_crop(__lowerCamelCase , __lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase__: Any = [self.rescale(__lowerCamelCase , __lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase__: Any = [self.normalize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase__: Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase__: Optional[int] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 149 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__: str = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Union[str, Any] = ['''YolosFeatureExtractor''']
A__: Optional[int] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 1 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCamelCase ( ):
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=_A , default=_A , required=_A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=_A , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=_A , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=_A , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=_A , default=0 , help='''cuda_id.''' , )
lowerCAmelCase_ = parser.parse_args()
return args
def __UpperCamelCase ( _A , _A , _A ):
if not len(_A ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowerCAmelCase_ , lowerCAmelCase_ = imgs[0].size
lowerCAmelCase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowerCAmelCase_ , lowerCAmelCase_ = grid.size
for i, img in enumerate(_A ):
grid.paste(_A , box=(i % cols * w, i // cols * h) )
return grid
def __UpperCamelCase ( _A , _A="robotic cat with wings" , _A=7.5 , _A=50 , _A=1 , _A=42 , ):
lowerCAmelCase_ = torch.Generator(pipeline.device ).manual_seed(_A )
lowerCAmelCase_ = pipeline(
_A , guidance_scale=_A , num_inference_steps=_A , generator=_A , num_images_per_prompt=_A , ).images
lowerCAmelCase_ = int(math.sqrt(_A ) )
lowerCAmelCase_ = image_grid(_A , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_A = parse_args()
# Load models and create wrapper for stable diffusion
_A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
_A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
_A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
_A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
_A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
_A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
_A = unet.to(torch.device('''cuda''', args.cuda_id))
_A = pipeline.to(unet.device)
_A , _A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
_A = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 353 |
import string
def __UpperCamelCase ( _A ):
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase_ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase_ = string.ascii_uppercase.find(_A )
lowerCAmelCase_ = num - key
if num < 0:
lowerCAmelCase_ = num + len(string.ascii_uppercase )
lowerCAmelCase_ = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase_ = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def __UpperCamelCase ( ):
lowerCAmelCase_ = input('''Encrypted message: ''' )
lowerCAmelCase_ = message.upper()
decrypt(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 167 | 0 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 194 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main() | 308 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCamelCase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowerCamelCase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowerCamelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict[str, int]:
lowerCAmelCase__ : Optional[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return x[0]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : int = get_letter_count(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = ''.join(freq_to_letter[freq] )
lowerCAmelCase__ : List[str] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Optional[int] = get_frequency_order(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 307 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 ) | 307 | 1 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class lowerCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
a__ : int = """owlvit_text_model"""
def __init__( self , __lowercase=49_408 , __lowercase=512 , __lowercase=2_048 , __lowercase=12 , __lowercase=8 , __lowercase=16 , __lowercase="quick_gelu" , __lowercase=1E-5 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=0 , __lowercase=49_406 , __lowercase=49_407 , **__lowercase , ) -> Optional[Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :int = hidden_size
__UpperCamelCase :List[str] = intermediate_size
__UpperCamelCase :Dict = num_hidden_layers
__UpperCamelCase :str = num_attention_heads
__UpperCamelCase :List[Any] = max_position_embeddings
__UpperCamelCase :Union[str, Any] = hidden_act
__UpperCamelCase :Optional[int] = layer_norm_eps
__UpperCamelCase :Optional[Any] = attention_dropout
__UpperCamelCase :str = initializer_range
__UpperCamelCase :Any = initializer_factor
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase)
__UpperCamelCase , __UpperCamelCase :List[str] = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
__UpperCamelCase :Any = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase)
class lowerCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
a__ : Union[str, Any] = """owlvit_vision_model"""
def __init__( self , __lowercase=768 , __lowercase=3_072 , __lowercase=12 , __lowercase=12 , __lowercase=3 , __lowercase=768 , __lowercase=32 , __lowercase="quick_gelu" , __lowercase=1E-5 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , **__lowercase , ) -> List[str]:
super().__init__(**__UpperCAmelCase)
__UpperCamelCase :Optional[int] = hidden_size
__UpperCamelCase :Any = intermediate_size
__UpperCamelCase :List[Any] = num_hidden_layers
__UpperCamelCase :str = num_attention_heads
__UpperCamelCase :List[Any] = num_channels
__UpperCamelCase :Tuple = image_size
__UpperCamelCase :Dict = patch_size
__UpperCamelCase :Any = hidden_act
__UpperCamelCase :Union[str, Any] = layer_norm_eps
__UpperCamelCase :int = attention_dropout
__UpperCamelCase :Any = initializer_range
__UpperCamelCase :Dict = initializer_factor
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase)
__UpperCamelCase , __UpperCamelCase :Dict = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
__UpperCamelCase :int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase)
class lowerCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
a__ : Optional[int] = """owlvit"""
a__ : int = True
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=512 , __lowercase=2.65_92 , __lowercase=True , **__lowercase , ) -> List[str]:
super().__init__(**__UpperCAmelCase)
if text_config is None:
__UpperCamelCase :Any = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''')
if vision_config is None:
__UpperCamelCase :Optional[int] = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''')
__UpperCamelCase :Union[str, Any] = OwlViTTextConfig(**__UpperCAmelCase)
__UpperCamelCase :Dict = OwlViTVisionConfig(**__UpperCAmelCase)
__UpperCamelCase :int = projection_dim
__UpperCamelCase :Optional[int] = logit_scale_init_value
__UpperCamelCase :Optional[Any] = return_dict
__UpperCamelCase :Optional[int] = 1.0
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase)
__UpperCamelCase , __UpperCamelCase :str = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase)
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase , **__lowercase) -> Union[str, Any]:
__UpperCamelCase :List[Any] = {}
__UpperCamelCase :str = text_config
__UpperCamelCase :Dict = vision_config
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Union[str, Any] = copy.deepcopy(self.__dict__)
__UpperCamelCase :List[Any] = self.text_config.to_dict()
__UpperCamelCase :str = self.vision_config.to_dict()
__UpperCamelCase :Optional[Any] = self.__class__.model_type
return output
class lowerCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
])
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
])
@property
def UpperCamelCase__ ( self) -> float:
return 1E-4
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , framework=__UpperCAmelCase)
__UpperCamelCase :Dict = super().generate_dummy_inputs(
processor.image_processor , batch_size=__UpperCAmelCase , framework=__UpperCAmelCase)
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase__ ( self) -> int:
return 14
| 43 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Dict = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Tuple = torch.device('''cpu''')
def __lowercase ( ):
a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
def __lowercase ( __lowerCAmelCase : List[str] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : int ):
a__ = []
for k in state_dict.keys():
a__ = k
if ".pwconv" in k:
a__ = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ = k_new.split('.' )
if ls[2].isdigit():
a__ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ = 1_0_0_0
a__ = 'huggingface/label-files'
a__ = 'imagenet-1k-id2label.json'
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ = [3, 3, 6, 4]
a__ = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
a__ = [3, 3, 9, 6]
a__ = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
a__ = [4, 3, 1_0, 5]
a__ = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
a__ = [4, 4, 1_2, 6]
a__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' , check_hash=__lowerCAmelCase )
else:
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = checkpoint
a__ = create_rename_keys(__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
a__ = SwiftFormerForImageClassification(__lowerCAmelCase ).eval()
hf_model.load_state_dict(__lowerCAmelCase )
# prepare test inputs
a__ = prepare_img()
a__ = ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ = processor(images=__lowerCAmelCase , return_tensors='pt' )
# compare outputs from both models
a__ = get_expected_output(__lowerCAmelCase )
a__ = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __lowerCAmelCase , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
snake_case : Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 109 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__: Dict = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
UpperCamelCase__: List[str] = []
UpperCamelCase__: int = []
UpperCamelCase__: Optional[int] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
UpperCamelCase__: List[str] = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"emoji": True,
},
}
]
UpperCamelCase__: List[str] = 0
for log in Path().glob("*.log"):
UpperCamelCase__: Tuple = 0
with open(log, "r") as f:
for line in f:
UpperCamelCase__: Optional[int] = json.loads(line)
if line.get("nodeid", "") != "":
UpperCamelCase__: Tuple = line["nodeid"]
if line.get("duration", None) is not None:
UpperCamelCase__: Union[str, Any] = F"{line['duration']:.4f}"
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__: Dict = []
log.unlink()
UpperCamelCase__: Dict = ""
UpperCamelCase__: Union[str, Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__: str = []
UpperCamelCase__: Any = {}
for test in failed_tests:
UpperCamelCase__: Optional[int] = test[0].split("::")
UpperCamelCase__: Any = data[0].split("/")[-1]
if data[0] not in filesafailed:
UpperCamelCase__: List[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__: Tuple = [test[0] for test in failed_table]
UpperCamelCase__: Optional[int] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__: Optional[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__: Optional[int] = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCamelCase__: Optional[Any] = "Too many failed tests, please see the full report in the Action results."
UpperCamelCase__: List[Any] = len(err) + 10
UpperCamelCase__: Any = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
UpperCamelCase__: int = "No failed tests! 🤗"
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
UpperCamelCase__: str = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
UpperCamelCase__: int = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
UpperCamelCase__: int = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
UpperCamelCase__: Optional[Any] = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
UpperCamelCase__: Dict = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
UpperCamelCase__: int = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__: Dict = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__: List[Any] = row[0]
else:
UpperCamelCase__: Tuple = ""
UpperCamelCase__: Dict = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 23 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : str = self.unet.config.sample_size
UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase : int = self.unet
UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : int = sample_mean.clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 23 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = [False] * len(snake_case_ )
UpperCAmelCase__ : Dict = [-1] * len(snake_case_ )
def dfs(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Tuple = c
for u in graph[v]:
if not visited[u]:
dfs(snake_case_ , 1 - c )
for i in range(len(snake_case_ ) ):
if not visited[i]:
dfs(snake_case_ , 0 )
for i in range(len(snake_case_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 370 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( lowerCAmelCase__ ) -> None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase__ : str = sum(single_char_strings.values() )
# one length string
UpperCAmelCase__ : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase__ : Optional[int] = single_char_strings[ch]
UpperCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCAmelCase__ : str = sum(two_char_strings.values() )
UpperCAmelCase__ : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
UpperCAmelCase__ : Dict = two_char_strings[sequence]
UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]:
UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore
UpperCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 299 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A: Optional[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase : Tuple = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase_ ( _UpperCAmelCase="no" , _UpperCAmelCase = default_json_config_file , _UpperCAmelCase = False ):
"""simple docstring"""
A_ : str = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
A_ : Optional[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
A_ : str = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
A_ : int = torch.cuda.device_count()
A_ : int = num_gpus
A_ : Tuple = False
if num_gpus > 1:
A_ : Optional[int] = '''MULTI_GPU'''
else:
A_ : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
A_ : str = torch.xpu.device_count()
A_ : Optional[int] = num_xpus
A_ : List[str] = False
if num_xpus > 1:
A_ : Any = '''MULTI_XPU'''
else:
A_ : Optional[Any] = '''NO'''
elif is_npu_available():
A_ : Union[str, Any] = torch.npu.device_count()
A_ : Optional[int] = num_npus
A_ : Union[str, Any] = False
if num_npus > 1:
A_ : List[str] = '''MULTI_NPU'''
else:
A_ : Tuple = '''NO'''
else:
A_ : Union[str, Any] = 0
A_ : str = True
A_ : str = 1
A_ : List[Any] = '''NO'''
A_ : Dict = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = parser.add_parser('''default''' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_UpperCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 167 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
snake_case_ : Optional[int] = TapasConfig.from_json_file(__a )
# set absolute/relative position embeddings parameter
snake_case_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ : int = TapasForQuestionAnswering(config=__a )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ : Optional[int] = 4
snake_case_ : List[str] = True
# hparam_utils.py hparams
snake_case_ : Optional[Any] = 0.664694
snake_case_ : Dict = 0.207951
snake_case_ : Tuple = 0.121194
snake_case_ : Dict = True
snake_case_ : int = True
snake_case_ : int = False
snake_case_ : str = 0.0352513
snake_case_ : int = TapasForQuestionAnswering(config=__a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ : int = 4
snake_case_ : Optional[int] = False
# hparam_utils.py hparams
snake_case_ : str = 36.4519
snake_case_ : Optional[Any] = 0.903421
snake_case_ : List[Any] = 222.088
snake_case_ : Optional[int] = True
snake_case_ : Optional[Any] = True
snake_case_ : str = True
snake_case_ : int = 0.763141
snake_case_ : str = TapasForQuestionAnswering(config=__a )
elif task == "TABFACT":
snake_case_ : List[Any] = TapasForSequenceClassification(config=__a )
elif task == "MLM":
snake_case_ : Optional[int] = TapasForMaskedLM(config=__a )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ : Tuple = TapasModel(config=__a )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__a , __a , __a )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
snake_case_ : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(__a )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@require_torch
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
snake_case__ = load_dataset('ashraq/esc50' )
snake_case__ = dataset['train']['audio'][-1]['array']
snake_case__ = audio_classifier(UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]:
pass
@slow
@require_torch
def lowerCAmelCase_ ( self: List[Any] ) -> Tuple:
snake_case__ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
snake_case__ = load_dataset('ashraq/esc50' )
snake_case__ = dataset['train']['audio'][-1]['array']
snake_case__ = audio_classifier(UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
snake_case__ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
snake_case__ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
pass
| 307 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
A_ : str = nums.pop(0 )
A_ : Optional[int] = permute(SCREAMING_SNAKE_CASE_ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE_ )
result.extend(SCREAMING_SNAKE_CASE_ )
nums.append(SCREAMING_SNAKE_CASE_ )
return result
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
def backtrack(SCREAMING_SNAKE_CASE ):
if start == len(SCREAMING_SNAKE_CASE_ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
A_ , A_ : List[Any] = nums[i], nums[start]
backtrack(start + 1 )
A_ , A_ : int = nums[i], nums[start] # backtrack
A_ : List[Any] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCamelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 363 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 65 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
def decorator(lowerCAmelCase__: Union[str, Any] ):
UpperCAmelCase_: List[str] = getattr(lowerCAmelCase__ , """handle_key""" , [] )
handle += [key]
setattr(lowerCAmelCase__ , """handle_key""" , lowerCAmelCase__ )
return func
return decorator
def lowerCAmelCase_ (*lowerCAmelCase__: List[str] ):
"""simple docstring"""
def decorator(lowerCAmelCase__: str ):
UpperCAmelCase_: Tuple = getattr(lowerCAmelCase__ , """handle_key""" , [] )
handle += keys
setattr(lowerCAmelCase__ , """handle_key""" , lowerCAmelCase__ )
return func
return decorator
class _a ( UpperCAmelCase__ ):
def __new__(cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Tuple = super().__new__(cls, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
if not hasattr(_SCREAMING_SNAKE_CASE, """key_handler""" ):
setattr(_SCREAMING_SNAKE_CASE, """key_handler""", {} )
setattr(_SCREAMING_SNAKE_CASE, """handle_input""", KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_: Any = getattr(_SCREAMING_SNAKE_CASE, """handle_key""", [] )
for key in handled_keys:
UpperCAmelCase_: Optional[Any] = value
return new_cls
@staticmethod
def __snake_case (cls ) -> Dict:
UpperCAmelCase_: Any = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_: Optional[Any] = ord(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_: Tuple = cls.key_handler.get(_SCREAMING_SNAKE_CASE )
if handler:
UpperCAmelCase_: Tuple = char
return handler(cls )
else:
return None
def lowerCAmelCase_ (cls: Tuple ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 147 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A: Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
__lowerCAmelCase : Optional[datasets.Features] = None
__lowerCAmelCase : str = "utf-8"
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : bool = True # deprecated
__lowerCAmelCase : Optional[int] = None # deprecated
__lowerCAmelCase : int = 10 << 20 # 10MB
__lowerCAmelCase : Optional[bool] = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase : int = JsonConfig
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCAmelCase : Any = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
UpperCAmelCase : int = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = [files]
UpperCAmelCase : List[str] = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCAmelCase : List[str] = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Dict = [files]
UpperCAmelCase : str = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase : Tuple = self.config.features.arrow_schema.field(_SCREAMING_SNAKE_CASE ).type
UpperCAmelCase : int = pa_table.append_column(_SCREAMING_SNAKE_CASE , pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=_SCREAMING_SNAKE_CASE ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase : Any = table_cast(_SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE )
# We keep only the field we are interested in
UpperCAmelCase : Dict = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
UpperCAmelCase : Optional[Any] = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase : Union[str, Any] = {col: [row.get(_SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
else:
UpperCAmelCase : Optional[Any] = dataset
UpperCAmelCase : List[str] = pa.Table.from_pydict(_SCREAMING_SNAKE_CASE )
yield file_idx, self._cast_table(_SCREAMING_SNAKE_CASE )
# If the file has one json object per line
else:
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
UpperCAmelCase : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase : int = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase : Dict = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCAmelCase : Tuple = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_SCREAMING_SNAKE_CASE )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase : Tuple = batch.decode(self.config.encoding , errors=_SCREAMING_SNAKE_CASE ).encode("""utf-8""" )
try:
while True:
try:
UpperCAmelCase : int = paj.read_json(
io.BytesIO(_SCREAMING_SNAKE_CASE ) , read_options=paj.ReadOptions(block_size=_SCREAMING_SNAKE_CASE ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_SCREAMING_SNAKE_CASE , pa.ArrowInvalid )
and "straddling" not in str(_SCREAMING_SNAKE_CASE )
or block_size > len(_SCREAMING_SNAKE_CASE )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(_SCREAMING_SNAKE_CASE )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase : str = json.load(_SCREAMING_SNAKE_CASE )
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase : Dict = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase : Union[str, Any] = {col: [row.get(_SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
UpperCAmelCase : Dict = pa.Table.from_pydict(_SCREAMING_SNAKE_CASE )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(_SCREAMING_SNAKE_CASE )
break
else:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_SCREAMING_SNAKE_CASE )
batch_idx += 1
| 109 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ = 16 , A_ = 88 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = 32 , A_ = None , A_ = False , A_ = None , A_ = None , A_ = "geglu" , A_ = None , )-> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , num_layers=lowerCAmelCase_ , dropout=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , cross_attention_dim=lowerCAmelCase_ , attention_bias=lowerCAmelCase_ , sample_size=lowerCAmelCase_ , num_vector_embeds=lowerCAmelCase_ , activation_fn=lowerCAmelCase_ , num_embeds_ada_norm=lowerCAmelCase_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase = [1, 0]
def UpperCAmelCase_ ( self , A_ , A_ , A_=None , A_=None , A_=None , A_ = True , )-> Any:
'''simple docstring'''
UpperCamelCase = hidden_states
UpperCamelCase = []
UpperCamelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase = self.transformer_index_for_condition[i]
UpperCamelCase = self.transformers[transformer_index](
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , timestep=lowerCAmelCase_ , cross_attention_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase_ )
| 365 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Tuple = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 251 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''markuplm'''
def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=30522 , __UpperCAmelCase : str=768 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : str=1E-12 , __UpperCAmelCase : str=0 , __UpperCAmelCase : int=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Optional[int]=256 , __UpperCAmelCase : Any=1024 , __UpperCAmelCase : Union[str, Any]=216 , __UpperCAmelCase : Any=1001 , __UpperCAmelCase : int=32 , __UpperCAmelCase : List[str]=50 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
# additional properties
_A = max_depth
_A = max_xpath_tag_unit_embeddings
_A = max_xpath_subs_unit_embeddings
_A = tag_pad_id
_A = subs_pad_id
_A = xpath_unit_hidden_size
| 79 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
snake_case = f"""{src_lang}-{tgt_lang}"""
snake_case = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case = os.path.join(_UpperCamelCase , 'README.md' )
print(f"""Generating {path}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent.parent.parent
SCREAMING_SNAKE_CASE__ = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model_name.split("-")
SCREAMING_SNAKE_CASE__ = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 369 | """simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
snake_case = sorted(string.lower() )
return len(_UpperCamelCase ) == len(set(_UpperCamelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("Enter a string ").strip()
SCREAMING_SNAKE_CASE__ = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 149 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , *,
UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int , UpperCamelCase__ : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = nn.Parameter(torch.zeros(UpperCamelCase__ ) )
# parameters for additional clip time embeddings
__magic_name__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
# parameters for encoder hidden states
__magic_name__ = clip_extra_context_tokens
__magic_name__ = nn.Linear(
UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
__magic_name__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = nn.LayerNorm(UpperCamelCase__ )
def _lowercase ( self : Optional[int] , *, UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__magic_name__ = image_embeddings.shape[0]
__magic_name__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__magic_name__ = classifier_free_guidance_embeddings.expand(
UpperCamelCase__ , -1 )
__magic_name__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__magic_name__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__magic_name__ = self.embedding_proj(UpperCamelCase__ )
__magic_name__ = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ )
__magic_name__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__magic_name__ = self.clip_extra_context_tokens_proj(UpperCamelCase__ )
__magic_name__ = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens )
__magic_name__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
__magic_name__ = self.encoder_hidden_states_proj(UpperCamelCase__ )
__magic_name__ = self.text_encoder_hidden_states_norm(UpperCamelCase__ )
__magic_name__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in sd.keys():
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
# pop unnecessary weights
_snake_case = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCamelCase__ )
_snake_case = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_snake_case = sd.pop(UpperCamelCase__ )
_snake_case = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_snake_case = sd[key]
# We split QKV in separate Q,K,V
_snake_case = key.replace('.qkv_proj.' , '.q_proj.' )
_snake_case = key.replace('.qkv_proj.' , '.k_proj.' )
_snake_case = key.replace('.qkv_proj.' , '.v_proj.' )
_snake_case = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_snake_case , _snake_case , _snake_case = torch.split(UpperCamelCase__ , depth // 3 , dim=0 )
_snake_case = q
_snake_case = k
_snake_case = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
_snake_case = load_checkpoint(UpperCamelCase__ )
if config is not None:
_snake_case = OPTConfig.from_pretrained(UpperCamelCase__ )
else:
_snake_case = OPTConfig()
_snake_case = OPTModel(UpperCamelCase__ ).half().eval()
model.load_state_dict(UpperCamelCase__ )
# Check results
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
UpperCAmelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 295 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Any = '''conditional_detr'''
__lowercase : str = ['''past_key_values''']
__lowercase : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=3_0_0 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__="resnet50" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=2 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.25 , **lowerCAmelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = use_timm_backbone
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = cls_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return self.encoder_attention_heads
@property
def snake_case_ ( self):
return self.d_model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def snake_case_ ( self):
return 1E-5
@property
def snake_case_ ( self):
return 1_2
| 100 | def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
def get_matched_characters(__A, __A ) -> str:
UpperCAmelCase__ = []
UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase__ = int(max(0, i - limit ) )
UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase__ = f"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = len(__A )
# transposition
UpperCAmelCase__ = (
len([(ca, ca) for ca, ca in zip(__A, __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase__ = 0.0
else:
UpperCAmelCase__ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase__ = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 65 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__lowerCAmelCase : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 'sgugger/tiny-distilbert-classification'
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() ) | 369 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : List[Any] = seq_length
__lowerCAmelCase : List[Any] = is_training
__lowerCAmelCase : Tuple = use_input_mask
__lowerCAmelCase : Tuple = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : List[Any] = embedding_size
__lowerCAmelCase : List[Any] = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : int = num_hidden_groups
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : List[Any] = type_sequence_label_size
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Dict = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
if self.use_token_type_ids:
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Any = None
if self.use_labels:
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = AlbertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = AlbertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , sentence_order_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = AlbertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = AlbertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : str = AlbertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : List[str] = AlbertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_choices
__lowerCAmelCase : Dict = AlbertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : int = config_and_inputs
__lowerCAmelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : str = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : Optional[Any] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = AlbertModelTester(self )
__lowerCAmelCase : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AlbertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = AlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase : List[str] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowerCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 182 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a_ : str = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = ['''image_processor''', '''tokenizer''']
A : Optional[Any] = '''BlipImageProcessor'''
A : Any = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = False
super().__init__(A, A )
SCREAMING_SNAKE_CASE : Any = self.image_processor
def __call__( self, A = None, A = None, A = True, A = False, A = None, A = None, A = 0, A = None, A = None, A = False, A = False, A = False, A = False, A = False, A = True, A = None, **A, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE : int = self.tokenizer
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_token_type_ids=A, return_length=A, verbose=A, return_tensors=A, **A, )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(A, return_tensors=A )
if text is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_token_type_ids=A, return_length=A, verbose=A, return_tensors=A, **A, )
else:
SCREAMING_SNAKE_CASE : int = None
if text_encoding is not None:
encoding_image_processor.update(A )
return encoding_image_processor
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A, **A )
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.tokenizer.decode(*A, **A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 251 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase ( __lowercase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase ( __lowercase : np.ndarray ,__lowercase : np.ndarray ,__lowercase : np.ndarray ):
'''simple docstring'''
A_ : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__lowercase ,__lowercase )
# Predict target for test data
A_ : List[Any] = xgb.predict(__lowercase )
A_ : Tuple = predictions.reshape(len(__lowercase ) ,1 )
return predictions
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[Any] = fetch_california_housing()
A_ , A_ : List[str] = data_handling(__lowercase )
A_ , A_ , A_ , A_ : Union[str, Any] = train_test_split(
__lowercase ,__lowercase ,test_size=0.25 ,random_state=1 )
A_ : Tuple = xgboost(__lowercase ,__lowercase ,__lowercase )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__lowercase ,__lowercase )}''' )
print(f'''Mean Square Error : {mean_squared_error(__lowercase ,__lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 192 | import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ,__lowercase : str ,__lowercase : List[Any] ,__lowercase : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
A_ : List[Any] = getattr(__lowercase ,__lowercase )
if weight_type is not None:
A_ : Dict = getattr(__lowercase ,__lowercase ).shape
else:
A_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ : Dict = value
elif weight_type == "weight_g":
A_ : str = value
elif weight_type == "weight_v":
A_ : int = value
elif weight_type == "bias":
A_ : int = value
else:
A_ : List[Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = []
A_ : int = fairseq_model.state_dict()
A_ : Optional[Any] = hf_model.feature_extractor
A_ : List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,hf_model.config.feat_extract_norm == 'group' ,)
A_ : Optional[Any] = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A_ : Tuple = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(__lowercase )[0].split('.' )[-2]
A_ : List[Any] = mapped_key.replace('*' ,__lowercase )
if "weight_g" in name:
A_ : Optional[int] = 'weight_g'
elif "weight_v" in name:
A_ : Union[str, Any] = 'weight_v'
elif "bias" in name:
A_ : Any = 'bias'
elif "weight" in name:
A_ : str = 'weight'
else:
A_ : Optional[Any] = None
set_recursively(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any] ,__lowercase : Tuple ,__lowercase : Dict ,__lowercase : Any ):
'''simple docstring'''
A_ : List[Any] = full_name.split('conv_layers.' )[-1]
A_ : Optional[int] = name.split('.' )
A_ : Tuple = int(items[0] )
A_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ : str = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Tuple ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = full_name.split('adaptor.' )[-1]
A_ : List[Any] = name.split('.' )
if items[1].isdigit():
A_ : Union[str, Any] = int(items[1] )
else:
A_ : Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
A_ : Dict = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
A_ : int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
A_ : Dict = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
A_ : Tuple = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__lowercase ,__lowercase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
A_ : Tuple = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
A_ : str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
A_ , A_ : Any = emb.weight.shape
A_ : Tuple = nn.Linear(__lowercase ,__lowercase ,bias=__lowercase )
A_ : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( __lowercase : Any ,__lowercase : Optional[int] ,__lowercase : Any ,__lowercase : str ,__lowercase : Dict ,__lowercase : Dict ,__lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : List[str] ,__lowercase : List[Any] ,__lowercase : str ,):
'''simple docstring'''
A_ : Optional[int] = WavaVecaConfig.from_pretrained(
__lowercase ,add_adapter=__lowercase ,adapter_stride=__lowercase ,adapter_kernel_size=__lowercase ,use_auth_token=__lowercase ,output_hidden_size=__lowercase ,)
A_ : Any = MBartConfig.from_pretrained(__lowercase )
# load model
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} ,)
A_ : Union[str, Any] = model[0].eval()
# load feature extractor
A_ : Any = WavaVecaFeatureExtractor.from_pretrained(__lowercase ,use_auth_token=__lowercase )
# set weights for wav2vec2 encoder
A_ : Optional[Any] = WavaVecaModel(__lowercase )
recursively_load_weights_wavaveca(model.encoder ,__lowercase )
# load decoder weights
A_ : Dict = MBartForCausalLM(__lowercase )
A_ , A_ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__lowercase )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ : Optional[int] = SpeechEncoderDecoderModel(encoder=__lowercase ,decoder=__lowercase )
A_ : Any = False
A_ : List[Any] = MBartaaTokenizer(__lowercase )
tokenizer.save_pretrained(__lowercase )
A_ : Dict = hf_wavavec.config.to_dict()
A_ : Any = tokenizer.pad_token_id
A_ : Optional[Any] = tokenizer.bos_token_id
A_ : Union[str, Any] = tokenizer.eos_token_id
A_ : Dict = 'mbart50'
A_ : str = 'wav2vec2'
A_ : int = tokenizer.eos_token_id
A_ : List[str] = 25_00_04
A_ : int = tokenizer.eos_token_id
A_ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(__lowercase )
hf_wavavec.save_pretrained(__lowercase )
feature_extractor.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
_UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 192 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCamelCase__, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = OpenAIGPTTokenizer
lowerCamelCase = OpenAIGPTTokenizerFast
lowerCamelCase = True
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
A_ : List[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
A_ : Tuple = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
A_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__lowerCamelCase ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Any:
return "lower newer", "lower newer"
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[int] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
A_ : Any = "lower"
A_ : int = ["low", "er</w>"]
A_ : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
A_ : List[str] = tokens + ["<unk>"]
A_ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A_ : List[str] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# Simple input
A_ : Union[str, Any] = "This is a simple input"
A_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
A_ : Any = ("This is a simple input", "This is a pair")
A_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" , )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
pass
| 344 |
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Tuple=None , __lowerCamelCase: Optional[Any]=None ):
'''simple docstring'''
UpperCamelCase__: Any = data
UpperCamelCase__: Tuple = previous
UpperCamelCase__: Any = next_node
def __str__( self: str ):
'''simple docstring'''
return F"{self.data}"
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.data
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
return self.next
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.previous
class _a :
"""simple docstring"""
def __init__( self: List[str] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = head
def __iter__( self: Optional[int] ):
'''simple docstring'''
return self
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
UpperCamelCase__: Tuple = self.current.get_data()
UpperCamelCase__: str = self.current.get_next()
return value
class _a :
"""simple docstring"""
def __init__( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = None # First node in list
UpperCamelCase__: str = None # Last node in list
def __str__( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.head
UpperCamelCase__: int = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase__: Optional[Any] = current.get_next()
return " ".join(str(__lowerCamelCase ) for node in nodes )
def __contains__( self: List[str] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase__: int = current.get_next()
return False
def __iter__( self: List[Any] ):
'''simple docstring'''
return LinkedListIterator(self.head )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Node ):
'''simple docstring'''
if self.head is None:
UpperCamelCase__: List[str] = node
UpperCamelCase__: List[str] = node
else:
self.insert_before_node(self.head , __lowerCamelCase )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Node ):
'''simple docstring'''
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = Node(__lowerCamelCase )
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.set_tail(__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Node , __lowerCamelCase: Node ):
'''simple docstring'''
UpperCamelCase__: Tuple = node
UpperCamelCase__: int = node.previous
if node.get_previous() is None:
UpperCamelCase__: List[str] = node_to_insert
else:
UpperCamelCase__: Union[str, Any] = node_to_insert
UpperCamelCase__: Dict = node_to_insert
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Node , __lowerCamelCase: Node ):
'''simple docstring'''
UpperCamelCase__: List[Any] = node
UpperCamelCase__: Dict = node.next
if node.get_next() is None:
UpperCamelCase__: Optional[int] = node_to_insert
else:
UpperCamelCase__: Optional[int] = node_to_insert
UpperCamelCase__: Any = node_to_insert
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = 1
UpperCamelCase__: Dict = Node(__lowerCamelCase )
UpperCamelCase__: Dict = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCamelCase , __lowerCamelCase )
return
current_position += 1
UpperCamelCase__: Dict = node.next
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase__: str = node.get_next()
raise Exception("Node not found" )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: Any ):
'''simple docstring'''
if (node := self.get_node(__lowerCamelCase )) is not None:
if node == self.head:
UpperCamelCase__: List[Any] = self.head.get_next()
if node == self.tail:
UpperCamelCase__: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__lowerCamelCase )
@staticmethod
def UpperCAmelCase_ ( __lowerCamelCase: Node ):
'''simple docstring'''
if node.get_next():
UpperCamelCase__: List[str] = node.previous
if node.get_previous():
UpperCamelCase__: Union[str, Any] = node.next
UpperCamelCase__: Union[str, Any] = None
UpperCamelCase__: int = None
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.head is None
def lowerCAmelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 | 0 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCAmelCase : List[str] =logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase : Union[str, Any] =2_5_6
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = ['melgan']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
# From MELGAN
lowercase = math.log(1E-5 ) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 128
self.register_modules(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(__lowerCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase , lowercase = input_range
lowercase = torch.clip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase )
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = noise_time
if not torch.is_tensor(__lowerCAmelCase ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.decoder(
encodings_and_masks=__lowerCAmelCase , decoder_input_tokens=__lowerCAmelCase , decoder_noise_time=__lowerCAmelCase )
return logits
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 100 , __lowerCAmelCase = True , __lowerCAmelCase = "numpy" , __lowerCAmelCase = None , __lowerCAmelCase = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__lowerCAmelCase )}.' )
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(__lowerCAmelCase ):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
__lowerCAmelCase , output_range=[-1.0, 1.0] , clip=__lowerCAmelCase )
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__lowerCAmelCase , continuous_mask=__lowerCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowerCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.decode(
encodings_and_masks=__lowerCAmelCase , input_tokens=__lowerCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowercase = self.scale_to_features(__lowerCAmelCase , input_range=[-1.0, 1.0] )
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase )
logger.info("""Generated segment""" , __lowerCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowerCAmelCase )
| 360 | """simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase = 5_0_0_0_0_0
lowerCAmelCase ,lowerCAmelCase = os.path.split(__file__)
lowerCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _lowerCamelCase( lowercase__ , **lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= dataset.map(**lowercase__ )
@get_duration
def _lowerCamelCase( lowercase__ , **lowercase__ ) -> int:
'''simple docstring'''
__lowercase= dataset.filter(**lowercase__ )
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase= datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
__lowercase= generate_example_dataset(
os.path.join(lowercase__ , 'dataset.arrow' ) , lowercase__ , num_examples=lowercase__ )
__lowercase= transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase__ )
def tokenize(lowercase__ ):
return tokenizer(examples['text'] )
__lowercase= map(lowercase__ )
__lowercase= map(lowercase__ , batched=lowercase__ )
__lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='numpy' ):
__lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='pandas' ):
__lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
__lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
__lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
__lowercase= map(lowercase__ , function=lowercase__ , batched=lowercase__ )
__lowercase= filter(lowercase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase__ , 'wb' ) as f:
f.write(json.dumps(lowercase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 295 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class A ( A_ ):
UpperCamelCase_ : Dict =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[str] =TaTokenizer
UpperCamelCase_ : List[int] =[]
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= vocab_file
__lowercase= False if not self.vocab_file else True
__lowercase= extra_ids
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , )
return max_model_length
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowercase= token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _A (self ):
return list(
set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _A (self ):
return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 295 | 1 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int ):
"""simple docstring"""
_a = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_a = n - k
# Calculate C(n,k)
for i in range(_lowerCAmelCase ):
result *= n - i
result //= i + 1
return result
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count, _lowerCAmelCase ) // (node_count + 1)
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
_a = 1
for i in range(1, n + 1 ):
result *= i
return result
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
return catalan_number(_lowerCAmelCase ) * factorial(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
f'binary trees and {catalan_number(node_count)} binary search trees.'
) | 153 |
"""simple docstring"""
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_a = name
_a = val
def __str__( self ) -> List[Any]:
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , __UpperCAmelCase ) -> Optional[Any]:
return self.val < other.val
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Tuple:
_a = {}
_a = {}
_a = self.build_heap(__UpperCAmelCase )
def __getitem__( self , __UpperCAmelCase ) -> Optional[Any]:
return self.get_value(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return (idx - 1) // 2
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return idx * 2 + 1
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
return idx * 2 + 2
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
return self.heap_dict[key]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
_a = len(__UpperCAmelCase ) - 1
_a = self.get_parent_idx(__UpperCAmelCase )
for idx, i in enumerate(__UpperCAmelCase ):
_a = idx
_a = i.val
for i in range(__UpperCAmelCase , -1 , -1 ):
self.sift_down(__UpperCAmelCase , __UpperCAmelCase )
return array
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
while True:
_a = self.get_left_child_idx(__UpperCAmelCase ) # noqa: E741
_a = self.get_right_child_idx(__UpperCAmelCase )
_a = idx
if l < len(__UpperCAmelCase ) and array[l] < array[idx]:
_a = l
if r < len(__UpperCAmelCase ) and array[r] < array[smallest]:
_a = r
if smallest != idx:
_a , _a = array[smallest], array[idx]
(
(
_a
) , (
_a
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_a = smallest
else:
break
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_a = self.get_parent_idx(__UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_a , _a = self.heap[idx], self.heap[p]
_a , _a = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_a = p
_a = self.get_parent_idx(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
return self.heap[0]
def _UpperCAmelCase ( self ) -> Any:
_a , _a = self.heap[-1], self.heap[0]
_a , _a = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_a = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
self.heap.append(__UpperCAmelCase )
_a = len(self.heap ) - 1
_a = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> str:
return len(self.heap ) == 0
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_a = new_value
_a = new_value
self.sift_up(self.idx_of_element[node] )
__snake_case = Node('''R''', -1)
__snake_case = Node('''B''', 6)
__snake_case = Node('''A''', 3)
__snake_case = Node('''X''', 1)
__snake_case = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__snake_case = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 153 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : List[Any] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
_UpperCAmelCase : int = "vit"
def __init__( self : int , A : Tuple=7_6_8 , A : Dict=1_2 , A : Union[str, Any]=1_2 , A : Union[str, Any]=3_0_7_2 , A : List[Any]="gelu" , A : Any=0.0 , A : str=0.0 , A : Union[str, Any]=0.02 , A : Dict=1e-12 , A : List[str]=2_2_4 , A : Union[str, Any]=1_6 , A : Union[str, Any]=3 , A : str=True , A : int=1_6 , **A : int , ) ->Union[str, Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : int = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Union[str, Any] = qkv_bias
lowerCamelCase__ : List[str] = encoder_stride
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
_UpperCAmelCase : List[str] = version.parse("1.11" )
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
return 1e-4
| 142 | import math
def A ( _lowercase ):
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = n
while left <= right:
SCREAMING_SNAKE_CASE : Union[str, Any] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_lowerCAmelCase : int = TaTokenizerFast
_lowerCAmelCase : List[Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
) | 340 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Optional[Any] = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Tuple=2 , lowercase_: Any=3 , lowercase_: List[str]=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> Optional[int]:
def get_dataset(lowercase_: int ):
A__ : List[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : str = get_dataset(lowercase_ )
A__ : Union[str, Any] = get_dataset(lowercase_ )
A__ : Any = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[int] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: List[str] , lowercase_: str , lowercase_: Dict , lowercase_: int , lowercase_: Union[str, Any]=None ) -> Optional[int]:
A__ : int = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Optional[int] = batch
A__ : List[str] = model(lowercase_ )
A__ : List[str] = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : int = nn.Parameter(torch.randn(1 ) )
A__ : Tuple = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Dict = DummyModel()
A__ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : List[str] = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[Any] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Any = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : Union[str, Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : Dict = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
A__ : List[Any] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Dict = model.a.item(), model.b.item()
A__ : str = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Tuple = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : Tuple = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : int = model.a.item(), model.b.item()
A__ : str = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[Any] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : int = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Union[str, Any] = DummyModel()
A__ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Any = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Optional[int] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
A__ : str = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Union[str, Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Optional[Any] = dummy_dataloaders()
A__ : Union[str, Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : int = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : int = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Dict = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : Tuple = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : List[str] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Optional[int] = torch.tensor([1, 2, 3] )
A__ : Optional[int] = torch.tensor([2, 3, 4] )
A__ : str = DummyModel()
A__ : List[str] = torch.optim.Adam(net.parameters() )
A__ : Optional[Any] = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : str = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : List[str] = DummyModel()
A__ : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : List[str] = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : Any = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Tuple = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Optional[Any] = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Any = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Optional[Any] = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : Any = '/tmp/accelerate/state_checkpointing'
A_ : Dict = DummyModel()
A_ : Any = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[str] = dummy_dataloaders()
A_ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : Optional[int] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Optional[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Any = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : Optional[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 192 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ : Any = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase (lowercase_: Any , lowercase_: List[str] ) -> Optional[int]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase (lowercase_: str ) -> str:
A__ : List[str] = _TestCommandArgs(dataset=lowercase_ , all_configs=lowercase_ , save_infos=lowercase_ )
A__ : int = TestCommand(*lowercase_ )
test_command.run()
A__ : Optional[Any] = os.path.join(lowercase_ , """README.md""" )
assert os.path.exists(lowercase_ )
A__ : Dict = DatasetInfosDict.from_directory(lowercase_ )
A__ : str = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A__ , A__ : Optional[Any] = getattr(dataset_infos["""default"""] , lowercase_ ), getattr(expected_dataset_infos["""default"""] , lowercase_ )
if key == "num_bytes":
assert is_apercent_close(lowercase_ , lowercase_ )
elif key == "splits":
assert list(lowercase_ ) == list(lowercase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 192 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''instructblip_vision_model'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Dict=1_408 , UpperCAmelCase__ : List[Any]=6_144 , UpperCAmelCase__ : Tuple=39 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Tuple=14 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=1e-6 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Optional[int]=1e-10 , UpperCAmelCase__ : int=True , **UpperCAmelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
A__ = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Any) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''instructblip_qformer'''
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any]=30_522 , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Any=3_072 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Tuple="absolute" , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Tuple=1_408 , **UpperCAmelCase__ : Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = cross_attention_frequency
A__ = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
A__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''instructblip'''
UpperCAmelCase__ = True
def __init__( self : Any , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=32 , **UpperCAmelCase__ : int) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
A__ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
A__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
A__ = InstructBlipVisionConfig(**UpperCAmelCase__)
A__ = InstructBlipQFormerConfig(**UpperCAmelCase__)
A__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
A__ = CONFIG_MAPPING[text_model_type](**UpperCAmelCase__)
A__ = self.text_config.tie_word_embeddings
A__ = self.text_config.is_encoder_decoder
A__ = num_query_tokens
A__ = self.vision_config.hidden_size
A__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A__ = 1.0
A__ = 0.02
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , UpperCAmelCase__ : InstructBlipVisionConfig , UpperCAmelCase__ : InstructBlipQFormerConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Any , ) ->int:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.qformer_config.to_dict()
A__ = self.text_config.to_dict()
A__ = self.__class__.model_type
return output
| 231 |
def SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> int:
"""simple docstring"""
A__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 231 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger('transformers.models.encodec')
A : Any = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
A : str = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
A : Any = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
A : Optional[int] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
A : List[str] = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
A : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A : Tuple = []
A : List[Any] = []
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
__a = getattr(__A , __A )
if weight_type is not None:
__a = getattr(__A , __A ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
elif weight_type == "running_mean":
__a = value
elif weight_type == "running_var":
__a = value
elif weight_type == "num_batches_tracked":
__a = value
elif weight_type == "weight_ih_l0":
__a = value
elif weight_type == "weight_hh_l0":
__a = value
elif weight_type == "bias_ih_l0":
__a = value
elif weight_type == "bias_hh_l0":
__a = value
elif weight_type == "weight_ih_l1":
__a = value
elif weight_type == "weight_hh_l1":
__a = value
elif weight_type == "bias_ih_l1":
__a = value
elif weight_type == "bias_hh_l1":
__a = value
else:
__a = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> Tuple:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
__a = []
if model_name == "encodec_24khz" or "encodec_32khz":
__a = MAPPING_24K
elif model_name == "encodec_48khz":
__a = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__A , __A ):
logger.info(F"""{name} was ignored""" )
continue
__a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__a = key.split('''.*.''' )
if prefix in name and suffix in name:
__a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
__a = True
if "*" in mapped_key:
__a = name.split(__A )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , __A )
if "weight_g" in name:
__a = 'weight_g'
elif "weight_v" in name:
__a = 'weight_v'
elif "weight_ih_l0" in name:
__a = 'weight_ih_l0'
elif "weight_hh_l0" in name:
__a = 'weight_hh_l0'
elif "bias_ih_l0" in name:
__a = 'bias_ih_l0'
elif "bias_hh_l0" in name:
__a = 'bias_hh_l0'
elif "weight_ih_l1" in name:
__a = 'weight_ih_l1'
elif "weight_hh_l1" in name:
__a = 'weight_hh_l1'
elif "bias_ih_l1" in name:
__a = 'bias_ih_l1'
elif "bias_hh_l1" in name:
__a = 'bias_hh_l1'
elif "bias" in name:
__a = 'bias'
elif "weight" in name:
__a = 'weight'
elif "running_mean" in name:
__a = 'running_mean'
elif "running_var" in name:
__a = 'running_var'
elif "num_batches_tracked" in name:
__a = 'num_batches_tracked'
else:
__a = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__ , a__=None , a__=None , ) -> Tuple:
if config_path is not None:
__a = EncodecConfig.from_pretrained(__A )
else:
__a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__a = [8, 5, 4, 4]
__a = [2.2]
__a = 64
__a = 3_2000
__a = 2048
__a = False
__a = False
__a = False
elif model_name == "encodec_48khz":
__a = [8, 5, 4, 2]
__a = [3.0, 6.0, 12.0, 24.0]
__a = 4_8000
__a = 2
__a = False
__a = 'time_group_norm'
__a = True
__a = 1.0
__a = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
__a = EncodecModel(__A )
__a = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__A )
__a = torch.load(__A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__a = original_checkpoint['best_state']
recursively_load_weights(__A , __A , __A )
model.save_pretrained(__A )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__A )
model.push_to_hub(__A )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
A : int = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 6 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : str = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 32 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 4 ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = matrix[::-1]
return matrix
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [x[::-1] for x in matrix]
return matrix
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__lowerCamelCase : Tuple = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__lowerCamelCase : Optional[Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 204 | from __future__ import annotations
__lowerCamelCase : Dict = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__lowerCamelCase : Union[str, Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = -1
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE__ = arr[j]
break
result.append(__UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for i, outer in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE__ = inner
break
result.append(__UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [-1] * arr_size
for index in reversed(range(__UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCamelCase : List[Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 204 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
lowerCAmelCase__ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = ["input_ids", "attention_mask"]
UpperCAmelCase_ = DistilBertTokenizer
def __init__(self , __a=None , __a=None , __a=True , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , __a=True , __a=None , **__a , ) -> List[str]:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(__a , normalizer_state.pop("type" ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**__a )
UpperCamelCase = do_lower_case
def snake_case_ (self , __a , __a=None ) -> Dict:
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
UpperCamelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 153 |
"""simple docstring"""
import re
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 153 | 1 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowercase_ : Union[str, Any] = (boundary[1] - boundary[0]) / steps
lowercase_ : Any = boundary[0]
lowercase_ : Optional[Any] = boundary[1]
lowercase_ : Tuple = make_points(a__ , a__ , a__ )
lowercase_ : Union[str, Any] = 0.0
y += (h / 2.0) * f(a__ )
for i in x_i:
# print(i)
y += h * f(a__ )
y += (h / 2.0) * f(a__ )
return y
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : List[str] = a + h
while x < (b - h):
yield x
lowercase_ : List[str] = x + h
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): # enter your function here
lowercase_ : List[str] = (x - 0) * (x - 0)
return y
def lowercase__( ):
lowercase_ : Dict = 0.0 # Lower bound of integration
lowercase_ : Dict = 1.0 # Upper bound of integration
lowercase_ : int = 10.0 # define number of steps or resolution
lowercase_ : Any = [a, b] # define boundary of integration
lowercase_ : str = method_a(a__ , a__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 361 | """simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a_ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a_ = TaTokenizerFast
a_ = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a_ = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 340 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = """ibert"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=False , __UpperCAmelCase="none" , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = quant_mode
a_ = force_dequant
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
]) | 366 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3)) | 303 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCamelCase__ ( __lowerCAmelCase : Tuple ):
"""simple docstring"""
if "model" in orig_key:
lowerCAmelCase_ = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCAmelCase_ = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCAmelCase_ = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCAmelCase_ = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCAmelCase_ = orig_key.split("." )[0].split("_" )[-1]
lowerCAmelCase_ = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCAmelCase_ = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCAmelCase_ = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCAmelCase_ = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCAmelCase_ = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCAmelCase_ = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCAmelCase_ = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCAmelCase_ = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCAmelCase_ = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCAmelCase_ = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCAmelCase_ = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCAmelCase_ = "yoso." + orig_key
return orig_key
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(__lowerCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCAmelCase_ = val
lowerCAmelCase_ = orig_state_dict["cls.predictions.decoder.bias"]
lowerCAmelCase_ = torch.arange(__lowerCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )["model_state_dict"]
lowerCAmelCase_ = YosoConfig.from_json_file(__lowerCAmelCase )
lowerCAmelCase_ = YosoForMaskedLM(__lowerCAmelCase )
lowerCAmelCase_ = convert_checkpoint_helper(config.max_position_embeddings , __lowerCAmelCase )
print(model.load_state_dict(__lowerCAmelCase ) )
model.eval()
model.save_pretrained(__lowerCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_A = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 231 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_A = logging.get_logger(__name__)
_A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase__ ( __lowerCAmelCase : List[DatasetType] , __lowerCAmelCase : Optional[List[float]] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[DatasetInfo] = None , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__lowerCAmelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.""" )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase )
else:
return _interleave_iterable_datasets(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase )
def lowerCamelCase__ ( __lowerCAmelCase : List[DatasetType] , __lowerCAmelCase : Optional[DatasetInfo] = None , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : int = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__lowerCAmelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.""" )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase )
else:
return _concatenate_iterable_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase )
| 231 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase ,'''embed_dim''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase ,'''num_heads''' ) )
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=64 ,_lowerCamelCase=3 ,_lowerCamelCase=[16, 48, 96] ,_lowerCamelCase=[1, 3, 6] ,_lowerCamelCase=[1, 2, 10] ,_lowerCamelCase=[7, 3, 3] ,_lowerCamelCase=[4, 2, 2] ,_lowerCamelCase=[2, 1, 1] ,_lowerCamelCase=[2, 2, 2] ,_lowerCamelCase=[False, False, True] ,_lowerCamelCase=[0.0, 0.0, 0.0] ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=2 ,) -> List[str]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = stride_kv
__lowercase = depth
__lowercase = cls_token
__lowercase = attention_drop_rate
__lowercase = initializer_range
__lowercase = layer_norm_eps
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
# create a random int32 tensor of given shape
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = TFCvtModel(config=_lowerCamelCase )
__lowercase = model(_lowerCamelCase ,training=_lowerCamelCase )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFCvtForImageClassification(_lowerCamelCase )
__lowercase = model(_lowerCamelCase ,labels=_lowerCamelCase ,training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : int = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a : Tuple = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a : List[str] = False
a : Optional[Any] = False
a : Optional[Any] = False
a : Union[str, Any] = False
a : Tuple = False
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = TFCvtModelTester(self )
__lowercase = TFCvtConfigTester(self ,config_class=_lowerCamelCase ,has_text_modality=_lowerCamelCase ,hidden_size=37 )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 ,reason='''TF does not support backprop for grouped convolutions on CPU.''' ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 ,reason='''TF does not support backprop for grouped convolutions on CPU.''' ,)
@slow
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
__lowercase = model_class(_lowerCamelCase )
__lowercase = model(**self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depth )
self.assertEqual(len(_lowerCamelCase ) ,_lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFCvtModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _lowerCAmelCase ( ):
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase ,return_tensors='''tf''' )
# forward pass
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_lowerCamelCase )
__lowercase = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_lowerCamelCase ,atol=1E-4 ) )
| 217 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCAmelCase ( ):
print('''Making key files...''' )
make_key_files('''rsa''' , 1_0_2_4 )
print('''Key files generation successful.''' )
def _lowerCAmelCase ( lowerCamelCase_ : int ):
print('''Generating prime p...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('''Generating prime q...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
__lowercase = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
__lowercase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
__lowercase = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__lowercase , __lowercase = generate_key(lowerCamelCase_ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 217 | 1 |
import numpy
class A:
'''simple docstring'''
def __init__( self : Optional[int] , A_ : numpy.ndarray , A_ : numpy.ndarray ) -> None:
"""simple docstring"""
lowerCamelCase_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ = numpy.zeros(output_array.shape )
def a__ ( self : str ) -> numpy.ndarray:
"""simple docstring"""
lowerCamelCase_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def a__ ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def a__ ( self : Tuple , A_ : numpy.ndarray , A_ : int , A_ : bool ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def a__ ( self : Optional[Any] , A_ : numpy.ndarray ) -> int:
"""simple docstring"""
lowerCamelCase_ = input_arr
lowerCamelCase_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE ( lowercase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE ( lowercase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ = TwoHiddenLayerNeuralNetwork(
input_array=lowercase , output_array=lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowercase , iterations=10 , give_loss=lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 204 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class A:
'''simple docstring'''
UpperCamelCase = BlenderbotConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self : int , A_ : Optional[int] , A_ : List[str]=13 , A_ : str=7 , A_ : Any=True , A_ : Any=False , A_ : Optional[Any]=99 , A_ : List[str]=32 , A_ : List[str]=2 , A_ : Dict=4 , A_ : List[str]=37 , A_ : List[str]=0.1 , A_ : Optional[int]=0.1 , A_ : str=20 , A_ : str=2 , A_ : Optional[Any]=1 , A_ : int=0 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_ = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def a__ ( self : Tuple , A_ : Union[str, Any] , A_ : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ = TFBlenderbotModel(config=A_ ).get_decoder()
lowerCamelCase_ = inputs_dict['input_ids']
lowerCamelCase_ = input_ids[:1, :]
lowerCamelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCamelCase_ = inputs_dict['head_mask']
lowerCamelCase_ = 1
# first forward pass
lowerCamelCase_ = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Any , lowercase : Tuple , lowercase : List[Any]=None , lowercase : List[str]=None , lowercase : List[Any]=None , lowercase : Tuple=None , lowercase : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFBlenderbotModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ['''My friends are cool but they eat too many carbs.''']
UpperCamelCase = '''facebook/blenderbot-400M-distill'''
@cached_property
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer(self.src_text , return_tensors='tf' )
lowerCamelCase_ = self.model.generate(
model_inputs.input_ids , )
lowerCamelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 204 | 1 |
"""simple docstring"""
import os
from collections.abc import Iterator
def lowercase__ ( _UpperCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowercase : Any = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return f'''{i * ' '}*''' if i else "\n##"
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase : Tuple = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_UpperCAmelCase )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def lowercase__ ( _UpperCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase : Dict = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowercase , lowercase : Dict = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowercase : str = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase : Optional[Any] = f'''{filepath}/{filename}'''.replace(' ' , '%20' )
lowercase : Optional[Any] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f'''{md_prefix(_UpperCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('.')
| 53 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] = np.inf
def set_batch_size(_UpperCAmelCase ) -> None:
nonlocal batch_size
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Any = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Dict = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and feature.dtype == "binary":
lowercase : int = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCAmelCase , _UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Union[str, Any], lowerCAmelCase : NestedDataStructureLike[PathLike], lowerCAmelCase : Optional[NamedSplit] = None, lowerCAmelCase : Optional[Features] = None, lowerCAmelCase : str = None, lowerCAmelCase : bool = False, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : int, ) -> List[Any]:
super().__init__(
lowerCAmelCase, split=lowerCAmelCase, features=lowerCAmelCase, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase, streaming=lowerCAmelCase, num_proc=lowerCAmelCase, **lowerCAmelCase, )
lowercase : str = path_or_paths if isinstance(lowerCAmelCase, lowerCAmelCase ) else {self.split: path_or_paths}
lowercase : Tuple = _PACKAGED_DATASETS_MODULES['parquet'][1]
lowercase : Optional[int] = Parquet(
cache_dir=lowerCAmelCase, data_files=lowerCAmelCase, features=lowerCAmelCase, hash=lowerCAmelCase, **lowerCAmelCase, )
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
# Build iterable dataset
if self.streaming:
lowercase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase : Tuple = None
lowercase : Union[str, Any] = None
lowercase : List[Any] = None
lowercase : int = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase, download_mode=lowerCAmelCase, verification_mode=lowerCAmelCase, base_path=lowerCAmelCase, num_proc=self.num_proc, )
lowercase : Any = self.builder.as_dataset(
split=self.split, verification_mode=lowerCAmelCase, in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self : Dict, lowerCAmelCase : Dataset, lowerCAmelCase : Union[PathLike, BinaryIO], lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : Optional[Any], ) -> Optional[Any]:
lowercase : List[Any] = dataset
lowercase : int = path_or_buf
lowercase : Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
lowercase : Optional[Any] = parquet_writer_kwargs
def lowercase ( self : Union[str, Any] ) -> int:
lowercase : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with open(self.path_or_buf, 'wb+' ) as buffer:
lowercase : int = self._write(file_obj=lowerCAmelCase, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs )
else:
lowercase : List[Any] = self._write(file_obj=self.path_or_buf, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs )
return written
def lowercase ( self : int, lowerCAmelCase : BinaryIO, lowerCAmelCase : int, **lowerCAmelCase : Union[str, Any] ) -> int:
lowercase : Optional[Any] = 0
lowercase : int = parquet_writer_kwargs.pop('path_or_buf', lowerCAmelCase )
lowercase : List[str] = self.dataset.features.arrow_schema
lowercase : int = pq.ParquetWriter(lowerCAmelCase, schema=lowerCAmelCase, **lowerCAmelCase )
for offset in logging.tqdm(
range(0, len(self.dataset ), lowerCAmelCase ), unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating parquet from Arrow format', ):
lowercase : Tuple = query_table(
table=self.dataset._data, key=slice(lowerCAmelCase, offset + batch_size ), indices=self.dataset._indices if self.dataset._indices is not None else None, )
writer.write_table(lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 53 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _UpperCAmelCase ( _A , _A ):
SCREAMING_SNAKE_CASE_ : List[Any] = "resnet"
SCREAMING_SNAKE_CASE_ : Tuple = ["basic", "bottleneck"]
def __init__( self : Any , A : Tuple=3 , A : str=64 , A : Tuple=[2_56, 5_12, 10_24, 20_48] , A : List[Any]=[3, 4, 6, 3] , A : Union[str, Any]="bottleneck" , A : int="relu" , A : List[Any]=False , A : Tuple=None , A : int=None , **A : List[str] , ) -> List[Any]:
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowercase_ : List[Any] = num_channels
lowercase_ : Tuple = embedding_size
lowercase_ : Dict = hidden_sizes
lowercase_ : Tuple = depths
lowercase_ : Optional[int] = layer_type
lowercase_ : str = hidden_act
lowercase_ : Dict = downsample_in_first_stage
lowercase_ : str = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(A ) + 1 )]
lowercase_ , lowercase_ : List[str] = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : str = version.parse("1.11" )
@property
def A ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Union[str, Any] ) -> float:
return 1e-3
| 33 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCAmelCase__ : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Any = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
UpperCAmelCase__ : Union[str, Any] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ : Optional[Any] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCAmelCase__ : List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCamelCase__ ( a ) -> List[Any]:
_A: List[str] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , a )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( ) -> Any:
_A: int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_A: Any = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_A: Union[str, Any] = collections.defaultdict(a )
_A: List[str] = collections.defaultdict(a )
_A: int = collections.defaultdict(a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(a ):
_A: List[str] = None
if _re_tf_models.match(a ) is not None:
_A: List[Any] = tf_models
_A: Union[str, Any] = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
_A: Optional[Any] = flax_models
_A: str = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
_A: Any = pt_models
_A: List[str] = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_prefix_to_model_type:
_A: Tuple = True
break
# Try again after removing the last word in the name
_A: List[Any] = ''''''.join(camel_case_split(a )[:-1] )
_A: str = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_A: Tuple = list(a )
all_models.sort()
_A: str = {'''model_type''': all_models}
_A: int = [pt_models[t] for t in all_models]
_A: str = [tf_models[t] for t in all_models]
_A: List[str] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_A: Tuple = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_A: Optional[int] = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_A: Any = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_A: Dict = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_A: List[Any] = '''AutoTokenizer'''
_A: Optional[int] = [processors[t] for t in all_models]
return pd.DataFrame(a )
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Dict = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_A: Any = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
_A: Dict = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(a , a , a ):
# The type of pipeline may not exist in this framework
if not hasattr(a , a ):
continue
# First extract all model_names
_A: List[str] = []
for name in getattr(a , a ).values():
if isinstance(a , a ):
model_names.append(a )
else:
model_names.extend(list(a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase__ ( a , a ) -> List[Any]:
_A: Optional[int] = get_frameworks_table()
_A: List[Any] = Dataset.from_pandas(a )
_A: List[Any] = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=a )
_A: Any = Dataset.from_json(a )
_A: Tuple = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(a ) )
}
_A: str = update_pipeline_and_auto_class_table(a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_A: str = sorted(table.keys() )
_A: List[str] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_A: Dict = Dataset.from_pandas(a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(a , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(a , '''pipeline_tags.json''' ) )
if commit_sha is not None:
_A: Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_A: str = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=a , repo_type='''dataset''' , token=a , commit_message=a , )
def lowerCamelCase__ ( ) -> List[Any]:
_A: Optional[int] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_A: Union[str, Any] = transformers_module.pipelines.SUPPORTED_TASKS
_A: int = []
for key in pipeline_tasks:
if key not in in_table:
_A: Dict = pipeline_tasks[key]['''pt''']
if isinstance(a , (list, tuple) ):
_A: Optional[Any] = model[0]
_A: Optional[int] = model.__name__
if model not in in_table.values():
missing.append(a )
if len(a ) > 0:
_A: Optional[Any] = ''', '''.join(a )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
UpperCAmelCase__ : List[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 301 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = ReformerTokenizer
_UpperCamelCase:List[str] = ReformerTokenizerFast
_UpperCamelCase:Optional[int] = True
_UpperCamelCase:Optional[int] = False
_UpperCamelCase:List[Any] = True
def _snake_case ( self )-> Dict:
super().setUp()
lowerCamelCase_ =ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self )-> Any:
lowerCamelCase_ ='''<s>'''
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_A ) , 1000 )
def _snake_case ( self )-> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self )-> Dict:
if not self.test_rust_tokenizer:
return
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_rust_tokenizer()
lowerCamelCase_ ='''I was born in 92000, and this is falsé.'''
lowerCamelCase_ =tokenizer.tokenize(_A )
lowerCamelCase_ =rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowerCamelCase_ =tokenizer.encode(_A , add_special_tokens=_A )
lowerCamelCase_ =rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowerCamelCase_ =self.get_rust_tokenizer()
lowerCamelCase_ =tokenizer.encode(_A )
lowerCamelCase_ =rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=15 )-> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase_ =self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
lowerCamelCase_ ='''This is a simple input'''
lowerCamelCase_ =['''This is a simple input 1''', '''This is a simple input 2''']
lowerCamelCase_ =('''This is a simple input''', '''This is a pair''')
lowerCamelCase_ =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="""max_length""" )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="""max_length""" )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="""max_length""" , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="""max_length""" )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="""max_length""" )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="""max_length""" , )
def _snake_case ( self )-> Dict:
pass
def _snake_case ( self )-> Any:
lowerCamelCase_ =ReformerTokenizer(_A , keep_accents=_A )
lowerCamelCase_ =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self )-> int:
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ ='''Hello World!'''
lowerCamelCase_ =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCamelCase_ =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def _snake_case ( self )-> List[str]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCamelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ =''' '''.join(_A )
lowerCamelCase_ =self.big_tokenizer.encode_plus(_A , return_tensors="""pt""" )
lowerCamelCase_ =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
lowerCamelCase_ =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCamelCase_ =encoded_sequence['''input_ids'''].shape
lowerCamelCase_ =ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def _snake_case ( self )-> Any:
lowerCamelCase_ ={'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCamelCase_ =[
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=_A , sequences=_A , )
| 154 |
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
lowerCamelCase__: Tuple =AutoTokenizer.from_pretrained("google/mt5-small")
lowerCamelCase__: Optional[Any] =tokenizer("Hello there" , return_tensors="np").input_ids
lowerCamelCase__: Any =tokenizer("Hi I am" , return_tensors="np").input_ids
lowerCamelCase__: Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id)
lowerCamelCase__: str =model(snake_case__ , decoder_input_ids=snake_case__).logits
lowerCamelCase__: Optional[int] =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1])).mean()
lowerCamelCase__: Tuple =-(labels.shape[-1] * loss.item())
lowerCamelCase__: Tuple =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 360 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "torchsde"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
| 273 | 0 |
"""simple docstring"""
__A = 65521
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: Dict = 1
__lowerCAmelCase: List[str] = 0
for plain_chr in plain_text:
__lowerCAmelCase: Dict = (a + ord(__SCREAMING_SNAKE_CASE )) % MOD_ADLER
__lowerCAmelCase: str = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 217 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__A = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__A = 128022
__A = 128028
@require_sentencepiece
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MaMaaaTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: List[str] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__lowerCAmelCase: List[str] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
__lowerCAmelCase: Optional[Any] = Path(self.tmpdirname)
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"])
__lowerCAmelCase: Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self : Optional[int] , **UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str)-> List[Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = "</s>"
__lowerCAmelCase: Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__) , UpperCamelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__) , UpperCamelCase__)
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.get_tokenizer()
__lowerCAmelCase: int = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , "</s>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "<s>")
self.assertEqual(len(UpperCamelCase__) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip("Skip this test while all models are still to be uploaded.")
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.get_tokenizer()
__lowerCAmelCase: str = tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [2, 3, 4, 5, 6] , )
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
__lowerCAmelCase: Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , "This is a test")
@slow
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """facebook/m2m100_418M"""
SCREAMING_SNAKE_CASE_ : List[str] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
SCREAMING_SNAKE_CASE_ : Tuple = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
SCREAMING_SNAKE_CASE_ : Any = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowercase_ ( cls : int)-> str:
'''simple docstring'''
__lowerCAmelCase: MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr")
__lowerCAmelCase: Any = 1
return cls
def lowercase_ ( self : int)-> Optional[int]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar") , 1_2_8_0_0_6)
self.assertEqual(self.tokenizer.get_lang_id("en") , 1_2_8_0_2_2)
self.assertEqual(self.tokenizer.get_lang_id("ro") , 1_2_8_0_7_6)
self.assertEqual(self.tokenizer.get_lang_id("mr") , 1_2_8_0_6_3)
def lowercase_ ( self : int)-> int:
'''simple docstring'''
__lowerCAmelCase: str = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__) , self.tokenizer.vocab_size)
self.assertEqual(vocab["<unk>"] , 3)
self.assertIn(self.tokenizer.get_lang_token("en") , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = "en"
__lowerCAmelCase: Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__)
def lowercase_ ( self : str)-> Tuple:
'''simple docstring'''
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids)
# fmt: off
__lowerCAmelCase: Optional[Any] = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__lowerCAmelCase: Optional[Any] = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__)
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = tempfile.mkdtemp()
__lowerCAmelCase: Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__)
__lowerCAmelCase: Tuple = MaMaaaTokenizer.from_pretrained(UpperCamelCase__)
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__)
@require_torch
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = "en"
__lowerCAmelCase: str = "fr"
__lowerCAmelCase: Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="pt")
__lowerCAmelCase: Optional[Any] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
__lowerCAmelCase: Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
__lowerCAmelCase: List[Any] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase: int = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
__lowerCAmelCase: List[str] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar")
self.assertEqual(
nested_simplify(UpperCamelCase__) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 217 | 1 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __lt__( self , __UpperCAmelCase) ->List[Any]:
return self[-1] < other[-1]
def __eq__( self , __UpperCAmelCase) ->Dict:
return self[-1] == other[-1]
def UpperCamelCase ( UpperCAmelCase ) ->list:
"""simple docstring"""
a_ = []
# sort into stacks
for element in collection:
a_ = Stack([element] )
a_ = bisect_left(UpperCAmelCase , UpperCAmelCase )
if i != len(UpperCAmelCase ):
stacks[i].append(UpperCAmelCase )
else:
stacks.append(UpperCAmelCase )
# use a heap-based merge to merge stack efficiently
a_ = merge(*(reversed(UpperCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted)) | 303 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins | 303 | 1 |
'''simple docstring'''
import os
import string
import sys
a__ : Dict =1 << 8
a__ : Union[str, Any] ={
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
a__ : Tuple =KEYMAP['''up''']
a__ : Any =KEYMAP['''left''']
if sys.platform == "win32":
a__ : Union[str, Any] =[]
a__ : Union[str, Any] ={
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
a__ : Union[str, Any] =ord(str(i))
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
__UpperCamelCase = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowercase ) == 0:
# Read the keystroke
__UpperCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__UpperCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__UpperCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(__lowercase )
if ord(__lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__UpperCamelCase = chr(KEYMAP['esc'] )
except KeyError:
__UpperCamelCase = cha[1]
else:
__UpperCamelCase = ch.decode(__lowercase )
else:
__UpperCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__UpperCamelCase = sys.stdin.fileno()
__UpperCamelCase = termios.tcgetattr(__lowercase )
try:
tty.setraw(__lowercase )
__UpperCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowercase , termios.TCSADRAIN , __lowercase )
return ch
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = get_raw_chars()
if ord(__lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowercase ) == KEYMAP["esc"]:
__UpperCamelCase = get_raw_chars()
if ord(__lowercase ) == KEYMAP["mod_int"]:
__UpperCamelCase = get_raw_chars()
if ord(__lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 53 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 53 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch""", """torchsde"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch', 'torchsde'] )
| 366 |
"""simple docstring"""
import re
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : str = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
lowercase__ : Optional[int] = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 289 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = ''''''
for word_or_phrase in separated:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319 | 0 |
import string
def __lowerCamelCase ( snake_case__ ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
_SCREAMING_SNAKE_CASE = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
_SCREAMING_SNAKE_CASE = string.ascii_uppercase.find(snake_case__ )
_SCREAMING_SNAKE_CASE = num - key
if num < 0:
_SCREAMING_SNAKE_CASE = num + len(string.ascii_uppercase )
_SCREAMING_SNAKE_CASE = translated + string.ascii_uppercase[num]
else:
_SCREAMING_SNAKE_CASE = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = input("""Encrypted message: """ )
_SCREAMING_SNAKE_CASE = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 125 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = OpenAIGPTTokenizer
__snake_case : Dict = OpenAIGPTTokenizerFast
__snake_case : Optional[Any] = True
__snake_case : Dict = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_SCREAMING_SNAKE_CASE = """lower"""
_SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""]
_SCREAMING_SNAKE_CASE = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
_SCREAMING_SNAKE_CASE = """This is a simple input"""
_SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""]
_SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""")
_SCREAMING_SNAKE_CASE = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ):
pass
| 125 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Any = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 0 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCamelCase__ = "sshleifer/mar_enro_6_3_student"
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self ):
super().setUp()
UpperCamelCase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=__lowerCAmelCase , )
UpperCamelCase__ = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
MarianMTModel.from_pretrained(__lowerCAmelCase )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
UpperCamelCase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
UpperCamelCase__ = bash_script.replace(__lowerCAmelCase , str(__lowerCAmelCase ) )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCamelCase__ = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCamelCase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(__lowerCAmelCase , """argv""" , __lowerCAmelCase ):
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = pl.Trainer.add_argparse_args(__lowerCAmelCase )
UpperCamelCase__ = SummarizationModule.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = main(__lowerCAmelCase )
# Check metrics
UpperCamelCase__ = load_json(model.metrics_save_path )
UpperCamelCase__ = metrics["""val"""][0]
UpperCamelCase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __lowerCAmelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase__ = os.listdir(__lowerCAmelCase )
UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase__ = os.path.join(args.output_dir , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase__ = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __SCREAMING_SNAKE_CASE ( _a ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
UpperCamelCase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
UpperCamelCase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
UpperCamelCase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
UpperCamelCase__ = bash_script.replace(__lowerCAmelCase , str(__lowerCAmelCase ) )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = bash_script.replace("""--fp16""" , """""" )
UpperCamelCase__ = 6
UpperCamelCase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
"""--gpus=1""",
"""--learning_rate=1e-3""",
f"""--num_train_epochs={epochs}""",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(__lowerCAmelCase , """argv""" , __lowerCAmelCase ):
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = pl.Trainer.add_argparse_args(__lowerCAmelCase )
UpperCamelCase__ = SummarizationDistiller.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCamelCase__ = distill_main(__lowerCAmelCase )
# Check metrics
UpperCamelCase__ = load_json(model.metrics_save_path )
UpperCamelCase__ = metrics["""val"""][0]
UpperCamelCase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __lowerCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase__ = os.listdir(__lowerCAmelCase )
UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase__ = os.path.join(args.output_dir , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase__ = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 368 |
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """masked_bert"""
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="topK" , __lowerCAmelCase="constant" , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = pruning_method
UpperCamelCase__ = mask_init
UpperCamelCase__ = mask_scale
| 87 | 0 |
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303 |
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = (KDPMaDiscreteScheduler,)
A__ : Tuple = 10
def lowerCamelCase_ ( self , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__UpperCamelCase )
return config
def lowerCamelCase_ ( self ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase_ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter.to(__UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase_ = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCamelCase ) )
if str(__UpperCamelCase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 261 |
import re
def lowerCamelCase__ ( a__ : str ) -> bool:
UpperCamelCase_ = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(a__ , a__ ) )
if __name__ == "__main__":
_A = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 261 | 1 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 322 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 61 | 1 |
'''simple docstring'''
import requests
snake_case_ : Optional[Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> None:
# fetching a list of articles in json format
UpperCAmelCase_ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''], 1 ):
print(F"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 125 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 125 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 200 ) -> int:
__lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 200]
__lowerCAmelCase = [0] * (pence + 1)
__lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 46 |
'''simple docstring'''
import random
def _lowerCAmelCase ( lowercase ) -> bool:
__lowerCAmelCase = num - 1
__lowerCAmelCase = 0
while s % 2 == 0:
__lowerCAmelCase = s // 2
t += 1
for _ in range(5 ):
__lowerCAmelCase = random.randrange(2 , num - 1 )
__lowerCAmelCase = pow(lowercase , lowercase , lowercase )
if v != 1:
__lowerCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowerCAmelCase = i + 1
__lowerCAmelCase = (v**2) % num
return True
def _lowerCAmelCase ( lowercase ) -> bool:
if num < 2:
return False
__lowerCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase )
def _lowerCAmelCase ( lowercase = 1024 ) -> int:
while True:
__lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase ):
return num
if __name__ == "__main__":
_a : Optional[int] = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 46 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : List[str] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase ( __snake_case : int , __snake_case : str , __snake_case : str=None , __snake_case : int=None , __snake_case : str=None , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : str=None , ):
if attention_mask is None:
lowercase_ : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ : str = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCAmelCase :
def __init__( self : Dict , A : Any , A : List[str]=13 , A : List[Any]=7 , A : int=True , A : Any=False , A : int=99 , A : Tuple=16 , A : Optional[Any]=2 , A : Union[str, Any]=4 , A : Dict=4 , A : Optional[Any]="gelu" , A : Optional[int]=0.1 , A : Tuple=0.1 , A : Optional[Any]=32 , A : Tuple=2 , A : Any=1 , A : List[str]=0 , A : Union[str, Any]=0.02 , ) -> Dict:
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : str = seq_length
lowercase_ : Tuple = is_training
lowercase_ : Optional[Any] = use_labels
lowercase_ : List[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Optional[int] = eos_token_id
lowercase_ : List[Any] = pad_token_id
lowercase_ : Union[str, Any] = bos_token_id
lowercase_ : Optional[int] = initializer_range
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase_ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase_ : Tuple = shift_tokens_right(lowercase_ , 1 , 2 )
lowercase_ : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
lowercase_ : List[Any] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def A ( self : Tuple ) -> int:
lowercase_ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self : Any , A : List[Any] , A : Any , A : List[Any] ) -> List[Any]:
lowercase_ : Union[str, Any] = 20
lowercase_ : List[Any] = model_class_name(lowercase_ )
lowercase_ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
lowercase_ : Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowercase_ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
lowercase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ : int = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
lowercase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
lowercase_ : Union[str, Any] = model.decode(lowercase_ , lowercase_ )
lowercase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def A ( self : Any , A : Union[str, Any] , A : Dict , A : Any ) -> Union[str, Any]:
lowercase_ : Optional[Any] = 20
lowercase_ : List[str] = model_class_name(lowercase_ )
lowercase_ : Any = model.encode(inputs_dict['''input_ids'''] )
lowercase_ : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowercase_ : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase_ : List[str] = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
lowercase_ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ : str = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
lowercase_ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase_ : int = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
lowercase_ : int = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
lowercase_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = 99
def A ( self : Any ) -> Tuple:
lowercase_ : Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase_ : Tuple = input_ids.shape[0]
lowercase_ : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = self._get_config_and_data()
lowercase_ : Tuple = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
lowercase_ : List[str] = lm_model(input_ids=lowercase_ )
lowercase_ : int = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase_ )
def A ( self : Optional[Any] ) -> str:
lowercase_ : List[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
lowercase_ : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowercase_ : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowercase_ : Union[str, Any] = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
lowercase_ : Optional[int] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase_ )
def A ( self : List[Any] ) -> str:
lowercase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowercase_ : int = shift_tokens_right(lowercase_ , 1 , 2 )
lowercase_ : Optional[int] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
lowercase_ : Any = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCAmelCase ( __A , unittest.TestCase , __A ):
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def A ( self : Any ) -> List[str]:
lowercase_ : Dict = FlaxBlenderbotSmallModelTester(self )
def A ( self : str ) -> List[str]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def A ( self : Any ) -> List[str]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def A ( self : Tuple ) -> List[str]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = model_class(lowercase_ )
@jax.jit
def encode_jitted(A : Any , A : List[Any]=None , **A : Dict ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowercase_ : List[str] = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase_ : Any = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : int = model_class(lowercase_ )
lowercase_ : List[str] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowercase_ : int = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(A : Any , A : int , A : Optional[int] ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest('''JIT Enabled''' ):
lowercase_ : Any = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase_ : Tuple = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : Dict ) -> int:
for model_class_name in self.all_model_classes:
lowercase_ : List[Any] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
lowercase_ : str = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 33 | import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
_lowerCamelCase : int = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
_lowerCamelCase : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
_lowerCamelCase : List[Any] = BeautifulSoup(res.text, "html.parser")
_lowerCamelCase : Optional[int] = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 249 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = load_tool('text-to-speech' )
self.tool.setup()
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 249 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = """gptsan-japanese"""
_snake_case : List[Any] = [
"""past_key_values""",
]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCamelCase=36000 , lowerCamelCase=1280 , lowerCamelCase=1024 , lowerCamelCase=8192 , lowerCamelCase=4096 , lowerCamelCase=128 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase=16 , lowerCamelCase=16 , lowerCamelCase=128 , lowerCamelCase=0.0 , lowerCamelCase=1E-5 , lowerCamelCase=False , lowerCamelCase=0.0 , lowerCamelCase="float32" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.002 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=35998 , lowerCamelCase=35995 , lowerCamelCase=35999 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = d_ff
__a = d_ext
__a = d_spout
__a = num_switch_layers
__a = num_ext_layers
__a = num_switch_layers + num_ext_layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = dropout_rate
__a = layer_norm_epsilon
__a = router_bias
__a = router_jitter_noise
__a = router_dtype
__a = router_ignore_padding_tokens
__a = output_hidden_states
__a = output_attentions
__a = initializer_factor
__a = output_router_logits
__a = use_cache
super().__init__(
separator_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
| 261 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a ( lowerCamelCase: NDArray[floataa] , lowerCamelCase: NDArray[floataa] , lowerCamelCase: list[int] , lowerCamelCase: int , ) -> list[float]:
'''simple docstring'''
__A , __A = coefficient_matrix.shape
__A , __A = constant_matrix.shape
if rowsa != colsa:
__A = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase )
if colsa != 1:
__A = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase )
if rowsa != rowsa:
__A = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase )
if len(lowerCamelCase ) != rowsa:
__A = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(lowerCamelCase )} and {rowsa}"""
)
raise ValueError(lowerCamelCase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__A = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__A , __A = table.shape
strictly_diagonally_dominant(lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase ):
__A = []
for row in range(lowerCamelCase ):
__A = 0
for col in range(lowerCamelCase ):
if col == row:
__A = table[row][col]
elif col == cols - 1:
__A = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__A = (temp + val) / denom
new_val.append(lowerCamelCase )
__A = new_val
return [float(lowerCamelCase ) for i in new_val]
def _a ( lowerCamelCase: NDArray[floataa] ) -> bool:
'''simple docstring'''
__A , __A = table.shape
__A = True
for i in range(0 , lowerCamelCase ):
__A = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""speech"""]
def __init__(self :Optional[int] , *_UpperCamelCase :int , **_UpperCamelCase :List[str] )-> List[str]:
requires_backends(self , ['''speech'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""speech"""]
def __init__(self :Optional[int] , *_UpperCamelCase :str , **_UpperCamelCase :List[str] )-> Union[str, Any]:
requires_backends(self , ['''speech'''] )
| 250 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if len(_snake_case ) != len(_snake_case ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowercase = [p / w for p, w in zip(_snake_case , _snake_case )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowercase = sorted(_snake_case )
# declaring useful variables
lowercase = len(_snake_case )
lowercase = 0
lowercase = 0
lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowercase = sorted_profit_by_weight[length - i - 1]
lowercase = profit_by_weight.index(_snake_case )
lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
UpperCAmelCase = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
UpperCAmelCase = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
UpperCAmelCase = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 195 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
snake_case__ : str = None
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Dict = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
snake_case__ : Any = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
snake_case__ : Dict = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''token_type_ids''']
__UpperCamelCase = FNetTokenizer
def __init__( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="[SEP]" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : Union[str, Any]="[CLS]" , UpperCamelCase_ : int="[MASK]" , **UpperCamelCase_ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase : int = (
AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ , normalized=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else mask_token
)
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : str = remove_space
lowerCAmelCase : Any = keep_accents
lowerCAmelCase : int = vocab_file
lowerCAmelCase : List[str] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[int] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : List[str] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 60 | 0 |
from __future__ import annotations
def a_ ( __lowercase : list[int] , __lowercase : int ) -> list[list[int]]:
_snake_case = []
_snake_case = []
_snake_case = 0
_snake_case = sum(lowerCAmelCase__ )
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return result
def a_ ( __lowercase : list[int] , __lowercase : int , __lowercase : int , __lowercase : list[int] , __lowercase : list[list[int]] , __lowercase : int , ) -> None:
if sum(lowerCAmelCase__ ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase__ )) < max_sum:
return
if sum(lowerCAmelCase__ ) == max_sum:
result.append(lowerCAmelCase__ )
return
for index in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
create_state_space_tree(
lowerCAmelCase__ , lowerCAmelCase__ , index + 1 , [*path, nums[index]] , lowerCAmelCase__ , remaining_nums_sum - nums[index] , )
_lowerCamelCase : Dict = [3, 34, 4, 12, 5, 2]
_lowerCamelCase : str = 9
_lowerCamelCase : Optional[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 360 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a_ ( __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
_snake_case = cva.getAffineTransform(__lowercase , __lowercase )
return cva.warpAffine(__lowercase , __lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowerCamelCase : Optional[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
_lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowerCamelCase , _lowerCamelCase : List[Any] = gray_img.shape
# set different points to rotate image
_lowerCamelCase : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_lowerCamelCase : List[str] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_lowerCamelCase : Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowerCamelCase : Any = plt.figure(1)
_lowerCamelCase : List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show() | 130 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A : Dict = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A : List[str] = 'UperNetConfig'
class _SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1 , )-> None:
super().__init__()
lowerCamelCase_ =nn.Convad(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =nn.BatchNormad(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =nn.ReLU()
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> torch.Tensor:
lowerCamelCase_ =self.conv(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.batch_norm(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.activation(_SCREAMING_SNAKE_CASE )
return output
class _SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
super().__init__()
lowerCamelCase_ =[
nn.AdaptiveAvgPoolad(_SCREAMING_SNAKE_CASE ),
UperNetConvModule(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> torch.Tensor:
lowerCamelCase_ =input
for layer in self.layers:
lowerCamelCase_ =layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
super().__init__()
lowerCamelCase_ =pool_scales
lowerCamelCase_ =align_corners
lowerCamelCase_ =in_channels
lowerCamelCase_ =channels
lowerCamelCase_ =[]
for i, pool_scale in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =UperNetPyramidPoolingBlock(pool_scale=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , channels=_SCREAMING_SNAKE_CASE )
self.blocks.append(_SCREAMING_SNAKE_CASE )
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[torch.Tensor]:
lowerCamelCase_ =[]
for ppm in self.blocks:
lowerCamelCase_ =ppm(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(_SCREAMING_SNAKE_CASE )
return ppm_outs
class _SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
super().__init__()
lowerCamelCase_ =config
lowerCamelCase_ =config.pool_scales # e.g. (1, 2, 3, 6)
lowerCamelCase_ =in_channels
lowerCamelCase_ =config.hidden_size
lowerCamelCase_ =False
lowerCamelCase_ =nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCamelCase_ =UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCamelCase_ =UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCamelCase_ =nn.ModuleList()
lowerCamelCase_ =nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCamelCase_ =UperNetConvModule(_SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
lowerCamelCase_ =UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_SCREAMING_SNAKE_CASE )
self.fpn_convs.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _snake_case ( self )-> str:
self.apply(self._init_weights )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Any:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =inputs[-1]
lowerCamelCase_ =[x]
psp_outs.extend(self.psp_modules(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
lowerCamelCase_ =self.bottleneck(_SCREAMING_SNAKE_CASE )
return output
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> torch.Tensor:
# build laterals
lowerCamelCase_ =[lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_SCREAMING_SNAKE_CASE ) )
# build top-down path
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase_ =laterals[i - 1].shape[2:]
lowerCamelCase_ =laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_SCREAMING_SNAKE_CASE , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
lowerCamelCase_ =[self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase_ =nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
lowerCamelCase_ =torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
lowerCamelCase_ =self.fpn_bottleneck(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.classifier(_SCREAMING_SNAKE_CASE )
return output
class _SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 1 )-> None:
super().__init__()
lowerCamelCase_ =config
lowerCamelCase_ =config.auxiliary_in_channels
lowerCamelCase_ =config.auxiliary_channels
lowerCamelCase_ =config.auxiliary_num_convs
lowerCamelCase_ =config.auxiliary_concat_input
lowerCamelCase_ =in_index
lowerCamelCase_ =(kernel_size // 2) * dilation
lowerCamelCase_ =[]
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
lowerCamelCase_ =nn.Identity()
else:
lowerCamelCase_ =nn.Sequential(*_SCREAMING_SNAKE_CASE )
if self.concat_input:
lowerCamelCase_ =UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
lowerCamelCase_ =nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _snake_case ( self )-> Union[str, Any]:
self.apply(self._init_weights )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> torch.Tensor:
# just take the relevant feature maps
lowerCamelCase_ =encoder_hidden_states[self.in_index]
lowerCamelCase_ =self.convs(_SCREAMING_SNAKE_CASE )
if self.concat_input:
lowerCamelCase_ =self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCamelCase_ =self.classifier(_SCREAMING_SNAKE_CASE )
return output
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:int = UperNetConfig
_UpperCamelCase:List[str] = "pixel_values"
_UpperCamelCase:Optional[Any] = True
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _snake_case ( self )-> Any:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Dict:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =value
__A : Any = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__A : Tuple = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , lowerCAmelCase__ , )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE )-> Optional[int]:
super().__init__(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCamelCase_ =UperNetHead(_SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
lowerCamelCase_ =UperNetFCNHead(_SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )-> Union[tuple, SemanticSegmenterOutput]:
lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ =output_attentions if output_attentions is not None else self.config.output_attentions
lowerCamelCase_ =self.backbone.forward_with_filtered_kwargs(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =outputs.feature_maps
lowerCamelCase_ =self.decode_head(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =nn.functional.interpolate(_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =None
if self.auxiliary_head is not None:
lowerCamelCase_ =self.auxiliary_head(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
lowerCamelCase_ =CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCamelCase_ =loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCamelCase_ =(logits,) + outputs[1:]
else:
lowerCamelCase_ =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 49 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> Optional[int]:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
lowerCamelCase_ =Text(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )-> List[str]:
# Build iterable dataset
if self.streaming:
lowerCamelCase_ =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
lowerCamelCase_ =self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 49 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase_ ) -> Optional[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase : Union[str, Any] = deprecated_arg[3:]
setattr(self , UpperCamelCase_ , not kwargs.pop(UpperCamelCase_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__lowercase : Dict = kwargs.pop('''torchscript''' , self.torchscript )
__lowercase : str = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__lowercase : str = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCamelCase_ )
UpperCamelCase =field(default=snake_case , metadata={"help": "Trace the models using torchscript"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
UpperCamelCase =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _lowerCamelCase ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__lowercase : str = torch.device('''cpu''' )
__lowercase : Optional[Any] = 0
elif is_torch_tpu_available():
__lowercase : str = xm.xla_device()
__lowercase : Any = 0
else:
__lowercase : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase : Any = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCamelCase ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowerCamelCase ( self ) -> Dict:
return self.n_gpu > 0
| 249 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="audio-spectrogram-transformer"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=10 , UpperCamelCase_=10 , UpperCamelCase_=10_24 , UpperCamelCase_=1_28 , **UpperCamelCase_ , ) -> Optional[int]:
super().__init__(**UpperCamelCase_ )
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : List[str] = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = patch_size
__lowercase : List[str] = qkv_bias
__lowercase : Union[str, Any] = frequency_stride
__lowercase : List[Any] = time_stride
__lowercase : Tuple = max_length
__lowercase : int = num_mel_bins
| 249 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
snake_case_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
lowercase__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[int] = value
elif weight_type == "weight_g":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase__ : Tuple = value
elif weight_type == "bias":
lowercase__ : Any = value
else:
lowercase__ : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = fairseq_model.state_dict()
lowercase__ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[str] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : Tuple = True
if "*" in mapped_key:
lowercase__ : Any = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
lowercase__ : Optional[Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
lowercase__ : int = 'weight_g'
elif "weight_v" in name:
lowercase__ : Any = 'weight_v'
elif "bias" in name:
lowercase__ : str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : Union[str, Any] = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = full_name.split('conv_layers.' )[-1]
lowercase__ : Dict = name.split('.' )
lowercase__ : List[str] = int(items[0] )
lowercase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Dict = UniSpeechSatConfig()
lowercase__ : str = ''
if is_finetuned:
lowercase__ : Any = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Optional[Any] = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ , lowercase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : List[str] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
snake_case_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 216 |
import numpy as np
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = int(np.ceil((x_end - xa) / h ) )
lowercase__ : Dict = np.zeros((n + 1,) )
lowercase__ : Dict = ya
lowercase__ : Union[str, Any] = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = f(SCREAMING_SNAKE_CASE_ , y[k] )
lowercase__ : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase__ : Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase__ : List[Any] = f(x + h , y[k] + h * ka )
lowercase__ : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowercase ( lowerCamelCase_ ):
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , """num_encoder_blocks""" ) )
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[16, 32, 64, 128] , lowercase=[1, 4, 8, 16] , lowercase=[1, 2, 4, 8] , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = sr_ratios
lowerCAmelCase = depths
lowerCAmelCase = hidden_sizes
lowerCAmelCase = downsampling_rates
lowerCAmelCase = num_attention_heads
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = scope
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Any:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = SegformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase = model(_UpperCamelCase )
lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = SegformerForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = 1
lowerCAmelCase = SegformerForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCamelCase )
lowerCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = SegformerModelTester(self )
lowerCAmelCase = SegformerConfigTester(self , config_class=_UpperCamelCase )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCamelCase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def _snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def _snake_case ( self ) -> Tuple:
pass
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_UpperCamelCase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase = outputs.attentions
lowerCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first attentions (first block, first layer)
lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase = len(_UpperCamelCase )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
lowerCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first attentions (first block, first layer)
lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _snake_case ( self ) -> int:
def check_hidden_states_output(lowercase , lowercase , lowercase ):
lowerCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _snake_case ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCamelCase ):
continue
lowerCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
lowerCAmelCase = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowerCAmelCase = model(**_UpperCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self ) -> Optional[int]:
pass
@slow
def _snake_case ( self ) -> Dict:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = SegformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_UpperCamelCase )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
lowerCAmelCase = model(_UpperCamelCase )
lowerCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowerCAmelCase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(_UpperCamelCase )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
lowerCAmelCase = model(_UpperCamelCase )
lowerCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowerCAmelCase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-1 ) )
@slow
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_UpperCamelCase )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
lowerCAmelCase = model(_UpperCamelCase )
lowerCAmelCase = outputs.logits.detach().cpu()
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(500, 300)] )
lowerCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
lowerCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 46 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.0_2 , _UpperCamelCase=0.9 , _UpperCamelCase=None , ):
"""simple docstring"""
_lowercase : List[str] = parent
_lowercase : Tuple = batch_size
_lowercase : Tuple = image_size
_lowercase : Any = num_channels
_lowercase : Tuple = patch_size
_lowercase : Union[str, Any] = tubelet_size
_lowercase : str = num_frames
_lowercase : Any = is_training
_lowercase : Tuple = use_labels
_lowercase : List[Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : Optional[Any] = initializer_range
_lowercase : int = mask_ratio
_lowercase : Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowercase : List[str] = (image_size // patch_size) ** 2
_lowercase : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowercase : str = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = VideoMAEModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Dict = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = VideoMAEForPreTraining(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase : Optional[int] = torch.ones((self.num_masks,) )
_lowercase : List[Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowercase : Tuple = mask.expand(self.batch_size , -1 ).bool()
_lowercase : Tuple = model(_UpperCamelCase , _UpperCamelCase )
# model only returns predictions for masked patches
_lowercase : Tuple = mask.sum().item()
_lowercase : Optional[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE : List[Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = VideoMAEModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
_lowercase : Any = copy.deepcopy(_UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase : Union[str, Any] = torch.ones((self.model_tester.num_masks,) )
_lowercase : Dict = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowercase : List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowercase : Any = bool_masked_pos.to(_UpperCamelCase )
if return_labels:
if model_class in [
*get_values(_UpperCamelCase ),
]:
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(_UpperCamelCase )
_lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : str = [*signature.parameters.keys()]
_lowercase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = VideoMAEModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[int] = True
for model_class in self.all_model_classes:
_lowercase : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_lowercase : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowercase : int = True
_lowercase : str = False
_lowercase : Any = True
_lowercase : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Union[str, Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : List[str] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : Tuple = True
_lowercase : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : Tuple = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowercase : str = len(_UpperCamelCase )
# Check attention is always last and order is fine
_lowercase : List[Any] = True
_lowercase : List[str] = True
_lowercase : Any = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
_lowercase : Optional[int] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_lowercase : Optional[int] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Tuple = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : List[str] = outputs.hidden_states
_lowercase : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_lowercase : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_lowercase : Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : List[str] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _A ( ) -> Any:
_lowercase : Tuple = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_lowercase : int = np.load(snake_case )
return list(snake_case )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
_UpperCamelCase )
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_video()
_lowercase : Union[str, Any] = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase : str = model(**_UpperCamelCase )
# verify the logits
_lowercase : List[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_lowercase : int = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_UpperCamelCase )
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_video()
_lowercase : List[Any] = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# add boolean mask, indicating which patches to mask
_lowercase : int = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
_lowercase : Any = torch.load(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**_UpperCamelCase )
# verify the logits
_lowercase : Dict = torch.Size([1, 1408, 1536] )
_lowercase : Tuple = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=_UpperCamelCase )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowercase : Tuple = torch.tensor([0.5_1_4_2] , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowercase : Dict = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_UpperCamelCase ).to(
_UpperCamelCase )
with torch.no_grad():
_lowercase : Optional[int] = model(**_UpperCamelCase )
_lowercase : List[str] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCamelCase , atol=1E-4 ) )
| 250 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _lowercase :
lowercase_ = 42
lowercase_ = None
lowercase_ = None
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = Node(1 )
lowerCamelCase : int = Node(2 )
lowerCamelCase : Any = Node(3 )
lowerCamelCase : Tuple = Node(4 )
lowerCamelCase : Dict = Node(5 )
return tree
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = []
if root is None:
return output
lowerCamelCase : str = deque([root] )
while process_queue:
lowerCamelCase : int = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = []
def populate_output(a_, a_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(__snake_case, __snake_case )
return output
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = []
def populate_output(a_, a_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(__snake_case, __snake_case )
return output
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if root is None:
return []
lowerCamelCase : List[str] = []
lowerCamelCase : Any = 0
lowerCamelCase : Dict = height(__snake_case )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__snake_case, __snake_case ) )
lowerCamelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__snake_case, __snake_case ) )
lowerCamelCase : List[Any] = 0
return output
def UpperCAmelCase ( ): # Main function for testing.
'''simple docstring'''
lowerCamelCase : Dict = make_tree()
print(F"""In-order Traversal: {inorder(__snake_case )}""" )
print(F"""Pre-order Traversal: {preorder(__snake_case )}""" )
print(F"""Post-order Traversal: {postorder(__snake_case )}""", '\n' )
print(F"""Height of Tree: {height(__snake_case )}""", '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(__snake_case ), '\n' )
print('Level-wise order Traversal: ' )
for level in range(1, height(__snake_case ) + 1 ):
print(F"""Level {level}:""", get_nodes_from_left_to_right(__snake_case, level=__snake_case ) )
print('\nZigZag order Traversal: ' )
print(zigzag(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 1000 ):
'''simple docstring'''
lowerCamelCase : Dict = 2**power
lowerCamelCase : List[str] = str(a_ )
lowerCamelCase : Dict = list(a_ )
lowerCamelCase : Optional[Any] = 0
for i in list_num:
sum_of_num += int(a_ )
return sum_of_num
if __name__ == "__main__":
_A = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
_A = solution(power)
print('Sum of the digits is: ', result)
| 205 | 0 |
import math
def lowercase__ ( __snake_case : list , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = len(__snake_case )
UpperCAmelCase_ : Optional[int] = int(math.floor(math.sqrt(__snake_case ) ) )
UpperCAmelCase_ : Optional[Any] = 0
while arr[min(__snake_case , __snake_case ) - 1] < x:
UpperCAmelCase_ : int = step
step += int(math.floor(math.sqrt(__snake_case ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCAmelCase_ : Union[str, Any] = prev + 1
if prev == min(__snake_case , __snake_case ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
__UpperCAmelCase = int(input('Enter the number to be searched:\n'))
__UpperCAmelCase = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F'Number {x} is at index {res}')
| 29 |
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not sequence:
return []
if len(lowerCamelCase__ ) == 1:
return list(lowerCamelCase__ )
lowercase__ : List[Any] = 0
lowercase__ : Any = len(lowerCamelCase__ ) - 1
lowercase__ : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase__ , lowercase__ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase__ , lowercase__ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
lowercase__ : Tuple = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowerCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by commas:\n''').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 130 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCAmelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_a : List[str] = list(s_dict.keys() )
for key in keys:
_a : List[Any] = r'.*/layers_(\d+)'
_a : Union[str, Any] = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : int = re.sub(r'layers_(\d+)' , r'block/\1/layer' , lowerCAmelCase_ )
_a : Dict = r'(encoder|decoder)\/'
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : Union[str, Any] = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
_a : Dict = re.sub(r'/mlp/' , r'/1/mlp/' , lowerCAmelCase_ )
_a : Union[str, Any] = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , lowerCAmelCase_ )
elif groups[0] == "decoder":
_a : Dict = re.sub(r'/mlp/' , r'/2/mlp/' , lowerCAmelCase_ )
_a : Dict = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a : int = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""{key} -> {new_key}""" )
_a : Optional[int] = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : int = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : Optional[int] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_a : Any = s_dict[key].shape[0]
_a : str = s_dict[key]
for idx in range(lowerCAmelCase_ ):
_a : Any = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(lowerCAmelCase_ )
return s_dict
__lowerCAmelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(lowerCAmelCase_ , 'r' ) as f:
_a : List[str] = f.read()
_a : Union[str, Any] = re.findall(r'(.*) = ([0-9.]*)' , lowerCAmelCase_ )
_a : Tuple = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a : Union[str, Any] = float(lowerCAmelCase_ ) if '.' in value else int(lowerCAmelCase_ )
_a : Any = re.findall(r'(.*activations) = \(\'(.*)\',\)' , lowerCAmelCase_ )[0]
_a : Tuple = str(activation[1] )
_a : Optional[Any] = num_experts
_a : List[Any] = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="./" , lowerCAmelCase_=8 ) -> Dict:
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
_a : Union[str, Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
_a : int = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_a : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
_a : Optional[int] = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
_a : Union[str, Any] = flax_params['target']
_a : Optional[Any] = flatten_dict(lowerCAmelCase_ , sep='/' )
_a : List[Any] = rename_keys(lowerCAmelCase_ )
_a : Dict = unflatten_dict(lowerCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 366 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
return "".join(sorted(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
return word_by_signature[signature(lowerCAmelCase_ )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 107 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Union[str, Any] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''unispeech'''
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : Optional[Any]="group" , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=(512, 512, 512, 512, 512, 512, 512) , __SCREAMING_SNAKE_CASE : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=128 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : int=0.05 , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=10 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Any=320 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=100 , __SCREAMING_SNAKE_CASE : List[str]=256 , __SCREAMING_SNAKE_CASE : Tuple=256 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : str="mean" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=256 , __SCREAMING_SNAKE_CASE : Tuple=80 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : str=0.5 , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE)
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(__SCREAMING_SNAKE_CASE)
__a = list(__SCREAMING_SNAKE_CASE)
__a = list(__SCREAMING_SNAKE_CASE)
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim)
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layerdrop
__a = layer_norm_eps
__a = initializer_range
__a = num_ctc_classes
__a = vocab_size
__a = do_stable_layer_norm
__a = use_weighted_layer_sum
__a = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a = num_codevectors_per_group
__a = num_codevector_groups
__a = contrastive_logits_temperature
__a = feat_quantizer_dropout
__a = num_negatives
__a = codevector_dim
__a = proj_codevector_dim
__a = diversity_loss_weight
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# pretraining loss
__a = replace_prob
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 49 |
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case :Any = logging.getLogger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''masked_bert'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 49 | 1 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCamelCase_( snake_case : str = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda snake_case : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 369 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
while b:
snake_case_ , snake_case_ = b, a % b
return a
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(snake_case , a % b )
def UpperCamelCase_( ):
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 92 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = "dpr"
def __init__(self : int , snake_case_ : Optional[Any]=3_0_5_2_2 , snake_case_ : List[Any]=7_6_8 , snake_case_ : Dict=1_2 , snake_case_ : int=1_2 , snake_case_ : Union[str, Any]=3_0_7_2 , snake_case_ : Any="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=5_1_2 , snake_case_ : Tuple=2 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : Any=1E-12 , snake_case_ : str=0 , snake_case_ : Tuple="absolute" , snake_case_ : int = 0 , **snake_case_ : Optional[int] , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
__a : int = vocab_size
__a : int = hidden_size
__a : Optional[int] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Tuple = hidden_act
__a : List[Any] = intermediate_size
__a : Optional[int] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : Any = type_vocab_size
__a : str = initializer_range
__a : Dict = layer_norm_eps
__a : Any = projection_dim
__a : Any = position_embedding_type
| 216 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase__ =getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 8 , lowerCAmelCase__ : int = 1_0_2_4 , lowerCAmelCase__ : Union[str, Any]="val" , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]="summarization" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : int="" , **lowerCAmelCase__ : int , ):
__a : List[Any] = str(lowerCAmelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=lowerCAmelCase__ )
__a : Tuple = Path(lowerCAmelCase__ )
__a : Dict = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase__ )
__a : Dict = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).cuda()
if fpaa:
__a : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase__ , lowerCAmelCase__ ) # update config with task specific params
__a : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__a : Dict = num_return_sequences
__a : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__a : Dict = tokenizer.model_max_length
if prefix is None:
__a : Dict = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__a : List[Any] = SeqaSeqDataset(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_target_length=1_0_2_4 , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__a : Tuple = ds.make_sortish_sampler(lowerCAmelCase__ , distributed=lowerCAmelCase__ , add_extra_examples=lowerCAmelCase__ , shuffle=lowerCAmelCase__ )
__a : List[Any] = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn )
__a : List[Any] = []
for batch in tqdm(lowerCAmelCase__ ):
__a : Any = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , **lowerCAmelCase__ , )
__a : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
__a : int = batch['''ids''']
if num_return_sequences > 1:
__a : List[str] = chunks(lowerCAmelCase__ , lowerCAmelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase__ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return results, sampler.num_replicas
def __UpperCamelCase ( ):
__a : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=lowerCAmelCase__ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=lowerCAmelCase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=lowerCAmelCase__ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument(
'''--type_path''' , type=lowerCAmelCase__ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=lowerCAmelCase__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowerCAmelCase__ , default=8 , required=lowerCAmelCase__ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=lowerCAmelCase__ , default=6_0_0 , required=lowerCAmelCase__ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument('''--tgt_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
'''--prefix''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__a : int = time.time()
__a , __a : Tuple = parser.parse_known_args()
__a : Optional[int] = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
__a : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) # this handles locking.
__a : Dict = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__a : Optional[Any] = {}
if args.src_lang is not None:
__a : int = args.src_lang
if args.tgt_lang is not None:
__a : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase__ )
__a , __a : Tuple = eval_data_dir(
args.data_dir , lowerCAmelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
if args.local_rank <= 0:
__a : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
__a : List[str] = gather_results_from_each_node(lowerCAmelCase__ , lowerCAmelCase__ , args.sync_timeout )
__a : int = combine_partial_results(lowerCAmelCase__ )
if args.num_return_sequences > 1:
__a : List[Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return
__a : Any = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(lowerCAmelCase__ ) as f:
__a : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
__a : str = '''translation''' in args.task
__a : List[str] = calculate_bleu if calc_bleu else calculate_rouge
__a : Any = '''bleu''' if calc_bleu else '''rouge'''
__a : Dict = score_fn(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Dict = len(lowerCAmelCase__ )
__a : str = time.time() - start_time
__a : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__a : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__a : Optional[int] = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
print(lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = []
for partial_result in partial_results:
records.extend(lowerCAmelCase__ )
__a : Tuple = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["id"] )
__a : Tuple = [x['''pred'''] for x in records]
return preds
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ):
# WAIT FOR lots of .json files
__a : Tuple = time.time()
logger.info('''waiting for all nodes to finish''' )
__a : Optional[int] = None
while (time.time() - start_wait) < timeout:
__a : Optional[int] = list(save_dir.glob('''rank_*.json''' ) )
if len(lowerCAmelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__a : Tuple = lmap(lowerCAmelCase__ , lowerCAmelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 216 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""CLIPFeatureExtractor"""]
lowerCamelCase = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = 1 # (0 is vertical, 1 is horizontal)
def a__ ( ):
UpperCAmelCase_ , UpperCAmelCase_ = get_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
print("Processing..." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = update_image_and_anno(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for index, image in enumerate(lowerCAmelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ = random_chars(32 )
UpperCAmelCase_ = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase_ = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , lowerCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(lowerCAmelCase__ )} with {file_name}""" )
UpperCAmelCase_ = []
for anno in new_annos[index]:
UpperCAmelCase_ = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(lowerCAmelCase__ )
with open(f"""/{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for label_file in glob.glob(os.path.join(lowerCAmelCase__ , "*.txt" ) ):
UpperCAmelCase_ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowerCAmelCase__ ) as in_file:
UpperCAmelCase_ = in_file.readlines()
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , f"""{label_name}.jpg""" )
UpperCAmelCase_ = []
for obj_list in obj_lists:
UpperCAmelCase_ = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCAmelCase__ )
labels.append(lowerCAmelCase__ )
return img_paths, labels
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for idx in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = []
UpperCAmelCase_ = img_list[idx]
path_list.append(lowerCAmelCase__ )
UpperCAmelCase_ = anno_list[idx]
UpperCAmelCase_ = cva.imread(lowerCAmelCase__ )
if flip_type == 1:
UpperCAmelCase_ = cva.flip(lowerCAmelCase__ , lowerCAmelCase__ )
for bbox in img_annos:
UpperCAmelCase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase_ = cva.flip(lowerCAmelCase__ , lowerCAmelCase__ )
for bbox in img_annos:
UpperCAmelCase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCAmelCase__ )
new_imgs_list.append(lowerCAmelCase__ )
return new_imgs_list, new_annos_lists, path_list
def a__ ( lowerCAmelCase__ = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ = ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 241 | 1 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _SCREAMING_SNAKE_CASE (A = 8 ) -> str:
"""simple docstring"""
lowercase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(A ) for _ in range(A ) )
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
i -= len(A )
lowercase__ = i // 3
lowercase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase__ = (
chars_incl
+ random(A , quotient + remainder )
+ random(A , A )
+ random(A , A )
)
lowercase__ = list(A )
shuffle(A )
return "".join(A )
# random is a generalised function for letters, characters and numbers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
return "".join(secrets.choice(A ) for _ in range(A ) )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[Any]:
"""simple docstring"""
pass # Put your code here...
def _SCREAMING_SNAKE_CASE (A , A ) -> Dict:
"""simple docstring"""
pass # Put your code here...
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
pass # Put your code here...
def _SCREAMING_SNAKE_CASE (A , A = 8 ) -> bool:
"""simple docstring"""
if len(A ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase__ = any(char in ascii_uppercase for char in password )
lowercase__ = any(char in ascii_lowercase for char in password )
lowercase__ = any(char in digits for char in password )
lowercase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _SCREAMING_SNAKE_CASE () -> Any:
"""simple docstring"""
lowercase__ = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase__ = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(A ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(A , A ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 2 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def lowercase_ ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 16 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__UpperCAmelCase : List[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
import torch
__UpperCAmelCase : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__UpperCAmelCase : int = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = pipeline("""text-classification""" )
__UpperCAmelCase : int = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__UpperCAmelCase : int = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__UpperCAmelCase : Union[str, Any] = """HuggingFace is in"""
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase )
__UpperCAmelCase : Any = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , )
__UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__UpperCAmelCase ):
text_classifier(__UpperCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 16 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
__A : int = """dandelin/vilt-b32-finetuned-vqa"""
__A : Tuple = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__A : Tuple = """image_qa"""
__A : List[str] = AutoProcessor
__A : Any = AutoModelForVisualQuestionAnswering
__A : str = ["""image""", """text"""]
__A : Union[str, Any] = ["""text"""]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
return self.pre_processor(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
def _snake_case ( self , __A ):
"""simple docstring"""
with torch.no_grad():
return self.model(**__lowerCamelCase ).logits
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 283 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__ (TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Any ) -> Optional[Any]:
super().__init__(features=__lowerCamelCase )
a = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict ) -> Dict:
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowerCamelCase )
return column
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> str:
import torch
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
a = {"dtype": torch.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
a = np.asarray(__lowerCamelCase )
return torch.tensor(__lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ) -> List[str]:
import torch
# support for torch, tf, jax etc.
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , torch.Tensor ):
a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : dict ) -> str:
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : pa.Table ) -> Mapping:
a = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
a = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : pa.Table ) -> "torch.Tensor":
a = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
a = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
a = self.recursive_tensorize(__lowerCamelCase )
a = self._consolidate(__lowerCamelCase )
return column
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : pa.Table ) -> Mapping:
a = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
a = self.python_features_decoder.decode_batch(__lowerCamelCase )
a = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
a = self._consolidate(batch[column_name] )
return batch
| 107 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( ):
__UpperCamelCase =10
__UpperCamelCase =datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__UpperCamelCase =datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(SCREAMING_SNAKE_CASE__ ) ),
} , features=SCREAMING_SNAKE_CASE__ , )
return dataset
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE__ )
return filename
# FILE_CONTENT + files
_A = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.txt'
__UpperCamelCase =FILE_CONTENT
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return filename
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
import bza
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__UpperCamelCase =bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )
with bza.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
import gzip
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__UpperCamelCase =bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )
with gzip.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__UpperCamelCase =bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )
with lza.frame.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as archive:
archive.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
import tarfile
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
import lzma
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__UpperCamelCase =bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )
with lzma.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
import zipfile
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__UpperCamelCase =bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )
with zstd.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.xml'
__UpperCamelCase =textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return filename
_A = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_A = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_A = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_A = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_A = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con:
__UpperCamelCase =con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as f:
__UpperCamelCase =csv.DictWriter(SCREAMING_SNAKE_CASE__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as f:
__UpperCamelCase =csv.DictWriter(SCREAMING_SNAKE_CASE__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
import bza
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__UpperCamelCase =pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
__UpperCamelCase =pq.ParquetWriter(SCREAMING_SNAKE_CASE__ , schema=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE__ ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE__ )
writer.write_table(SCREAMING_SNAKE_CASE__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__UpperCamelCase ={'data': DATA}
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__UpperCamelCase ={'data': DATA_DICT_OF_LISTS}
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
import gzip
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
import gzip
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE__ , 'wb' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.join('nested' , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
f.add(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE__ , arcname=os.path.join('nested' , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =['0', '1', '2', '3']
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =['0', '1', '2', '3']
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =['0', '1', '2', '3']
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ='\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__UpperCamelCase =str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( ):
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( ):
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ) )
f.write(SCREAMING_SNAKE_CASE__ , arcname=os.path.basename(SCREAMING_SNAKE_CASE__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 117 |
from __future__ import annotations
from typing import Any
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[Any] ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_A = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 117 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : List[str] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
UpperCAmelCase_ : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase_ : Dict = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
UpperCAmelCase_ : str = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = "adapt act apte"
UpperCAmelCase_ : str = "adapt act apte"
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : Any = "adapt act apte"
UpperCAmelCase_ : Union[str, Any] = ["adapt", "act", "ap@@", "te"]
UpperCAmelCase_ : str = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Optional[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
UpperCAmelCase_ : List[Any] = "I am a small frog."
UpperCAmelCase_ : List[Any] = tok([src_text] , padding=lowercase_ , truncation=lowercase_ )["input_ids"]
UpperCAmelCase_ : Dict = tok.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
UpperCAmelCase_ : List[str] = "I am a small frog ."
UpperCAmelCase_ : Any = "."
UpperCAmelCase_ : Union[str, Any] = tok(lowercase_ )["input_ids"]
UpperCAmelCase_ : List[Any] = tok(lowercase_ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 61 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = DebertaVaTokenizer
_a : Optional[Any] = DebertaVaTokenizerFast
_a : List[str] = True
_a : Optional[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "this is a test"
__lowerCAmelCase = "this is a test"
return input_text, output_text
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(_A ) , 3_0_0_0_1 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "This is a test"
__lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DebertaVaTokenizer(_A )
__lowerCAmelCase = tokenizer.encode("sequence builders" )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 92 | 0 |
"""simple docstring"""
import math
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE=0 ) -> int: # a graph with Node 0,1,...,N-1
'''simple docstring'''
UpperCAmelCase : Tuple = n
UpperCAmelCase : int = [
[math.inf for j in range(0 , _a )] for i in range(0 , _a )
] # adjacency matrix for weight
UpperCAmelCase : Optional[int] = [
[math.inf for j in range(0 , _a )] for i in range(0 , _a )
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = w
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
A: Optional[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 361 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=sys.maxsize ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = """bilinear"""
UpperCAmelCase : Tuple = max_size
UpperCAmelCase : Optional[Any] = short_edge_length
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = []
for img in imgs:
UpperCAmelCase , UpperCAmelCase : List[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase : List[str] = size * 1.0 / min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if h < w:
UpperCAmelCase , UpperCAmelCase : Dict = size, scale * w
else:
UpperCAmelCase , UpperCAmelCase : Optional[int] = scale * h, size
if max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > self.max_size:
UpperCAmelCase : List[str] = self.max_size * 1.0 / max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = newh * scale
UpperCAmelCase : int = neww * scale
UpperCAmelCase : Tuple = int(neww + 0.5 )
UpperCAmelCase : Optional[int] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase : str = Image.fromarray(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase : Union[str, Any] = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=_SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(_SCREAMING_SNAKE_CASE )
return img_augs
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase : List[Any] = cfg.INPUT.FORMAT
UpperCAmelCase : Dict = cfg.SIZE_DIVISIBILITY
UpperCAmelCase : Dict = cfg.PAD_VALUE
UpperCAmelCase : Tuple = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase : Any = cfg.MODEL.DEVICE
UpperCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase : List[str] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase : Optional[Any] = lambda _SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] = tuple(max(_SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase : List[Any] = [im.shape[-2:] for im in images]
UpperCAmelCase : Any = [
nn.functional.pad(
_SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return torch.stack(_SCREAMING_SNAKE_CASE ), torch.tensor(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = [images]
if single_image:
assert len(_SCREAMING_SNAKE_CASE ) == 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_SCREAMING_SNAKE_CASE , images.pop(_SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(_SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase : str = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase : int = self.aug(_SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase : Any = [self.normalizer(_SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.pad(_SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase : int = torch.true_divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Dict ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Tuple[int, int] ):
assert torch.isfinite(UpperCamelCase ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase , UpperCAmelCase : str = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase )
| 76 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[str] = """markuplm"""
def __init__( self : List[Any] , a_ : Optional[int]=3_05_22 , a_ : Dict=7_68 , a_ : Optional[int]=12 , a_ : Any=12 , a_ : Any=30_72 , a_ : Tuple="gelu" , a_ : List[Any]=0.1 , a_ : Tuple=0.1 , a_ : Dict=5_12 , a_ : Tuple=2 , a_ : Optional[Any]=0.02 , a_ : Tuple=1e-1_2 , a_ : List[str]=0 , a_ : List[str]=0 , a_ : Optional[Any]=2 , a_ : Dict=2_56 , a_ : int=10_24 , a_ : Tuple=2_16 , a_ : int=10_01 , a_ : List[Any]=32 , a_ : List[str]=50 , a_ : Optional[Any]="absolute" , a_ : Tuple=True , a_ : Any=None , **a_ : Tuple , ):
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : int = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : Any = classifier_dropout
# additional properties
lowerCAmelCase_ : List[str] = max_depth
lowerCAmelCase_ : int = max_xpath_tag_unit_embeddings
lowerCAmelCase_ : List[Any] = max_xpath_subs_unit_embeddings
lowerCAmelCase_ : List[str] = tag_pad_id
lowerCAmelCase_ : Optional[Any] = subs_pad_id
lowerCAmelCase_ : int = xpath_unit_hidden_size
| 241 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = 0
if start < end:
lowerCAmelCase_ : Dict = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[str] = a[end]
lowerCAmelCase_ : List[str] = a[pivot]
lowerCAmelCase_ : Any = temp
lowerCAmelCase_ , lowerCAmelCase_ : Any = _in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 )
count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase )
return count
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Tuple = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : str = a[end]
lowerCAmelCase_ : List[Any] = a[pivot]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Dict = start - 1
for index in range(__UpperCamelCase , __UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase_ : Dict = new_pivot_index + 1
lowerCAmelCase_ : Tuple = a[new_pivot_index]
lowerCAmelCase_ : List[Any] = a[index]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Any = a[new_pivot_index + 1]
lowerCAmelCase_ : int = a[end]
lowerCAmelCase_ : str = temp
return new_pivot_index + 1, count
lowercase__ = TemporaryFile()
lowercase__ = 100 # 1000 elements are to be sorted
lowercase__ , lowercase__ = 0, 1 # mean and standard deviation
lowercase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowercase__ = np.load(outfile)
lowercase__ = len(M) - 1
lowercase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 241 | 1 |
import requests
from bsa import BeautifulSoup
def __A ( _lowercase = "AAPL" ):
'''simple docstring'''
_A = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_A = BeautifulSoup(requests.get(snake_case__ ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 364 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@require_torch
def __A ( self: Dict ) -> Optional[int]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Dict ) -> Tuple:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Any ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Optional[int] ) -> Dict:
_A = '''
from transformers import pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_A = self.get_env()
_A = '''1'''
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __A ( self: Optional[int] ) -> int:
_A = '''
from transformers import AutoModel
'''
_A = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 75 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.